Publications
Search
Nurunnabi, Abdul; Teferle, Felicia; Laefer, Debra F.; Chen, Meida; Ali, Mir Masoom
Development of a Precise Tree Structure from LiDAR Point Clouds Journal Article
In: Int. Arch. Photogramm. Remote Sens. Spatial Inf. Sci., vol. XLVIII-2-2024, pp. 301–308, 2024, ISSN: 2194-9034.
@article{nurunnabi_development_2024,
title = {Development of a Precise Tree Structure from LiDAR Point Clouds},
author = {Abdul Nurunnabi and Felicia Teferle and Debra F. Laefer and Meida Chen and Mir Masoom Ali},
url = {https://isprs-archives.copernicus.org/articles/XLVIII-2-2024/301/2024/},
doi = {10.5194/isprs-archives-XLVIII-2-2024-301-2024},
issn = {2194-9034},
year = {2024},
date = {2024-06-01},
urldate = {2024-07-11},
journal = {Int. Arch. Photogramm. Remote Sens. Spatial Inf. Sci.},
volume = {XLVIII-2-2024},
pages = {301–308},
abstract = {Abstract. A precise tree structure that represents the distribution of tree stem, branches, and leaves is crucial for accurately capturing the full representation of a tree. Light Detection and Ranging (LiDAR)-based three-dimensional (3D) point clouds (PCs) capture the geometry of scanned objects including forests stands and individual trees. PCs are irregular, unstructured, often noisy, and contaminated by outliers. Researchers have struggled to develop methods to separate leaves and wood without losing the tree geometry. This paper proposes a solution that employs only the spatial coordinates (x, y, z) of the PC. The new algorithm works as a filtering approach, utilizing multi-scale neighborhood-based geometric features (GFs) e.g., linearity, planarity, and verticality to classify linear (wood) and non-linear (leaf) points. This involves finding potential wood points and coupling them with an octree-based segmentation to develop a tree architecture. The main contributions of this paper are (i) investigating the potential of different GFs to split linear and non-linear points, (ii) introducing a novel method that pointwise classifies leaf and wood points, and (iii) developing a precise 3D tree structure. The performance of the new algorithm has been demonstrated through terrestrial laser scanning PCs. For a Scots pine tree, the new method classifies leaf and wood points with an overall accuracy of 97.9%.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chemburkar, Ankur; Gordon, Andrew; Feng, Andrew
Evaluating Vision-Language Models on the TriangleCOPA Benchmark Journal Article
In: FLAIRS-37, vol. 37, 2024.
@article{chemburkar_evaluating_2024,
title = {Evaluating Vision-Language Models on the TriangleCOPA Benchmark},
author = {Ankur Chemburkar and Andrew Gordon and Andrew Feng},
year = {2024},
date = {2024-05-01},
journal = {FLAIRS-37},
volume = {37},
abstract = {The TriangleCOPA benchmark consists of 100 textual questions with videos depicting the movements of simple shapes in the style of the classic social-psychology film created by Fritz Heider and Marianne Simmel in 1944. In our experiments, we investigate the performance of current vision-language models on this challenging benchmark, assessing the capability of these models for visual anthropomorphism and abstract interpretation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Feng, Andrew
Combining the Predictions of Out-of-Domain Classifiers Using Etcetera Abduction Proceedings Article
In: 2024 58th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Princeton, NJ, USA, 2024, ISBN: 9798350369298.
@inproceedings{gordon_combining_2024,
title = {Combining the Predictions of Out-of-Domain Classifiers Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10480194/},
doi = {10.1109/CISS59072.2024.10480194},
isbn = {9798350369298},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
booktitle = {2024 58th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Princeton, NJ, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Zifan; Tavakoli, Erfan Bank; Chen, Meida; You, Suya; Rao, Raghuveer; Agarwal, Sanjeev; Ren, Fengbo
TokenMotion: Motion-Guided Vision Transformer for Video Camouflaged Object Detection Via Learnable Token Selection Miscellaneous
2024, (arXiv:2311.02535 [cs]).
@misc{yu_tokenmotion_2024,
title = {TokenMotion: Motion-Guided Vision Transformer for Video Camouflaged Object Detection Via Learnable Token Selection},
author = {Zifan Yu and Erfan Bank Tavakoli and Meida Chen and Suya You and Raghuveer Rao and Sanjeev Agarwal and Fengbo Ren},
url = {http://arxiv.org/abs/2311.02535},
year = {2024},
date = {2024-02-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {The area of Video Camouflaged Object Detection (VCOD) presents unique challenges in the field of computer vision due to texture similarities between target objects and their surroundings, as well as irregular motion patterns caused by both objects and camera movement. In this paper, we introduce TokenMotion (TMNet), which employs a transformer-based model to enhance VCOD by extracting motion-guided features using a learnable token selection. Evaluated on the challenging MoCA-Mask dataset, TMNet achieves state-of-the-art performance in VCOD. It outperforms the existing state-of-the-art method by a 12.8% improvement in weighted F-measure, an 8.4% enhancement in S-measure, and a 10.7% boost in mean IoU. The results demonstrate the benefits of utilizing motion-guided features via learnable token selection within a transformer-based framework to tackle the intricate task of VCOD.},
note = {arXiv:2311.02535 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ziming; Suen, Christine Wun Ki; Zou, Zhengbo; Chen, Meida; Shi, Yangming
Assessing Workers’ Operational Postures via Egocentric Camera Mapping Proceedings Article
In: Computing in Civil Engineering 2023, pp. 17–24, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8522-4.
@inproceedings{liu_assessing_2024,
title = {Assessing Workers’ Operational Postures via Egocentric Camera Mapping},
author = {Ziming Liu and Christine Wun Ki Suen and Zhengbo Zou and Meida Chen and Yangming Shi},
url = {https://ascelibrary.org/doi/10.1061/9780784485224.003},
doi = {10.1061/9780784485224.003},
isbn = {978-0-7844-8522-4},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-19},
booktitle = {Computing in Civil Engineering 2023},
pages = {17–24},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Timothy S.; Gordon, Andrew S.
Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5 Book Section
In: Holloway-Attaway, Lissa; Murray, John T. (Ed.): Interactive Storytelling, vol. 14384, pp. 297–305, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-47657-0 978-3-031-47658-7, (Series Title: Lecture Notes in Computer Science).
@incollection{holloway-attaway_playing_2023,
title = {Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5},
author = {Timothy S. Wang and Andrew S. Gordon},
editor = {Lissa Holloway-Attaway and John T. Murray},
url = {https://link.springer.com/10.1007/978-3-031-47658-7_28},
doi = {10.1007/978-3-031-47658-7_28},
isbn = {978-3-031-47657-0 978-3-031-47658-7},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Interactive Storytelling},
volume = {14384},
pages = {297–305},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Andrew S.; Feng, Andrew
Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction Proceedings Article
In: 2023 57th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Baltimore, MD, USA, 2023, ISBN: 978-1-66545-181-9.
@inproceedings{gordon_searching_2023,
title = {Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10089729/},
doi = {10.1109/CISS56502.2023.10089729},
isbn = {978-1-66545-181-9},
year = {2023},
date = {2023-03-01},
urldate = {2023-08-07},
booktitle = {2023 57th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Baltimore, MD, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Hu, Qingyong; Yu, Zifan; Thomas, Hugues; Feng, Andrew; Hou, Yu; McCullough, Kyle; Ren, Fengbo; Soibelman, Lucio
STPLS3D: A Large-Scale Synthetic and Real Aerial Photogrammetry 3D Point Cloud Dataset Miscellaneous
2022, (arXiv:2203.09065 [cs]).
@misc{chen_stpls3d_2022,
title = {STPLS3D: A Large-Scale Synthetic and Real Aerial Photogrammetry 3D Point Cloud Dataset},
author = {Meida Chen and Qingyong Hu and Zifan Yu and Hugues Thomas and Andrew Feng and Yu Hou and Kyle McCullough and Fengbo Ren and Lucio Soibelman},
url = {http://arxiv.org/abs/2203.09065},
year = {2022},
date = {2022-10-01},
urldate = {2023-08-22},
publisher = {arXiv},
abstract = {Although various 3D datasets with different functions and scales have been proposed recently, it remains challenging for individuals to complete the whole pipeline of large-scale data collection, sanitization, and annotation. Moreover, the created datasets usually suffer from extremely imbalanced class distribution or partial low-quality data samples. Motivated by this, we explore the procedurally synthetic 3D data generation paradigm to equip individuals with the full capability of creating large-scale annotated photogrammetry point clouds. Specifically, we introduce a synthetic aerial photogrammetry point clouds generation pipeline that takes full advantage of open geospatial data sources and off-the-shelf commercial packages. Unlike generating synthetic data in virtual games, where the simulated data usually have limited gaming environments created by artists, the proposed pipeline simulates the reconstruction process of the real environment by following the same UAV flight pattern on different synthetic terrain shapes and building densities, which ensure similar quality, noise pattern, and diversity with real data. In addition, the precise semantic and instance annotations can be generated fully automatically, avoiding the expensive and time-consuming manual annotation. Based on the proposed pipeline, we present a richly-annotated synthetic 3D aerial photogrammetry point cloud dataset, termed STPLS3D, with more than 16 $kmˆ2$ of landscapes and up to 18 fine-grained semantic categories. For verification purposes, we also provide a parallel dataset collected from four areas in the real environment. Extensive experiments conducted on our datasets demonstrate the effectiveness and quality of the proposed synthetic dataset.},
note = {arXiv:2203.09065 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Gordon, Andrew S.; Wang, Timothy S.
Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates Book Section
In: Mitchell, Alex; Vosmeer, Mirjam (Ed.): Interactive Storytelling, vol. 13138, pp. 71–79, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-92299-3 978-3-030-92300-6.
@incollection{gordon_narrative_2021,
title = {Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates},
author = {Andrew S. Gordon and Timothy S. Wang},
editor = {Alex Mitchell and Mirjam Vosmeer},
url = {https://link.springer.com/10.1007/978-3-030-92300-6_7},
doi = {10.1007/978-3-030-92300-6_7},
isbn = {978-3-030-92299-3 978-3-030-92300-6},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-22},
booktitle = {Interactive Storytelling},
volume = {13138},
pages = {71–79},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Chen, Meida; Feng, Andrew; McCullough, Kyle; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio
3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework Journal Article
In: Journal of Computing in Civil Engineering, vol. 34, no. 6, 2020, ISSN: 0887-3801, 1943-5487.
@article{chen_3d_2020,
title = {3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework},
author = {Meida Chen and Andrew Feng and Kyle McCullough and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0000929},
doi = {10.1061/(ASCE)CP.1943-5487.0000929},
issn = {0887-3801, 1943-5487},
year = {2020},
date = {2020-11-01},
journal = {Journal of Computing in Civil Engineering},
volume = {34},
number = {6},
abstract = {The US Army is paying increased attention to the development of rapid three-dimensional (3D) reconstruction using photogrammetry and unmanned aerial vehicle (UAV) technologies for creating virtual environments and simulations in areas of interest. The ability of the intelligence community, mission commanders, and front-line soldiers to understand their deployed physical environment in advance is critical in the planning and rehearsal phases of any military operation. In order to achieve various simulation capabilities such as destruction operations, route planning, and explosive-standoff distances computation among others, reconstructed 3D data needs to be properly attributed. In this paper, we introduce a model ensembling framework for segmenting a 3D photogrammetry point cloud into top-level terrain elements (i.e., ground, human-made objects, and vegetation). Preprocessing and postprocessing methods were designed to overcome the data segmentation challenges posed by photogrammetric data-quality issues. A large UAV-based photogrammetric database was created for validation purposes. The designed model ensembling framework was compared with existing point cloud segmentation algorithms, and it outperformed other algorithms and achieved the best F1-score. Because the ultimate goal of segmenting a photogrammetric-generated point cloud is to create realistic virtual environments for simulation. Qualitative results for creating virtual environments using the segmented data are also discussed in this paper. DOI: 10.1061/(ASCE)CP.1943-5487.0000929. © 2020 American Society of Civil Engineers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Miller, Rob; Morgenstern, Leora; Turán, György
Preface: Special Issue on Commonsense Reasoning, Annals of Mathematics and Artificial Intelligence Journal Article
In: Annals of Mathematics and Artificial Intelligence, 2020, ISSN: 1012-2443, 1573-7470.
@article{gordon_preface_2020,
title = {Preface: Special Issue on Commonsense Reasoning, Annals of Mathematics and Artificial Intelligence},
author = {Andrew S. Gordon and Rob Miller and Leora Morgenstern and György Turán},
url = {http://link.springer.com/10.1007/s10472-020-09711-5},
doi = {10.1007/s10472-020-09711-5},
issn = {1012-2443, 1573-7470},
year = {2020},
date = {2020-09-01},
journal = {Annals of Mathematics and Artificial Intelligence},
abstract = {A few years after the 1956 Dartmouth Summer Workshop [1, 2], which first established artificial intelligence as a field of research, John McCarthy [3] discussed the importance of explicitly representing and reasoning with commonsense knowledge to the enterprise of creating artificially intelligent robots and agents. McCarthy proposed that commonsense knowledge was best represented using formal logic, which he viewed as a uniquely powerful lingua franca that could be used to express and reason with virtually any sort of information that humans might reason with when problem solving, a stance he further developed and propounded in [4, 5]. This approach, the formalist or logic-based approach to commonsense reasoning, was practiced by an increasing set of adherents over the next several decades [6, 7], and continues to be represented by the Commonsense Symposium Series, first held in 1991 [8] and held biennially, for the most part, after that.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Feng, Andrew; Gordon, Andrew S.
Recognizing Multiplayer Behaviors Using Synthetic Training Data Proceedings Article
In: 2020 IEEE Conference on Games (CoG), pp. 463–470, 2020, (ISSN: 2325-4289).
@inproceedings{feng_recognizing_2020,
title = {Recognizing Multiplayer Behaviors Using Synthetic Training Data},
author = {Andrew Feng and Andrew S. Gordon},
doi = {10.1109/CoG47356.2020.9231742},
year = {2020},
date = {2020-08-01},
booktitle = {2020 IEEE Conference on Games (CoG)},
pages = {463–470},
abstract = {Accurate recognition of group behaviors is essential to the design of engaging networked multiplayer games. However, contemporary data-driven machine learning solutions are difficult to apply during the game development process, given that no authentic gameplay data is yet available for use as training data. In this paper, we investigate the use of synthetic training data, i.e., gameplay data that is generated by AI-controlled agent teams programmed to perform each of the behaviors to be recognized in groups of human players. The particular task we focus on is to recognize group movement formations in player-controlled avatars in a realistic virtual world. We choose five typical military team movement patterns for the formation recognition task and train machine learning models using procedurally generated unit trajectories as training data. The experiments were conducted using ResNet and EfficientNet, which are two popular convolutional neural network architectures for image classifications. The synthetic data is augmented by creating variations in image rotation, unit spacing, team size, and positional perturbations to bridge the gap between synthetic and human gameplay data. We demonstrate that high-accuracy behavior recognition can be achieved using deep neural networks by applying the aforementioned data augmentation methods to simulated gameplay data.},
note = {ISSN: 2325-4289},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio; Enloe, Mike
Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),, pp. 13, ResearchGate, Orlando, FL, 2020.
@inproceedings{chen_fully_2020,
title = {Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain},
author = {Meida Chen and Andrew Feng and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman and Mike Enloe},
url = {https://www.researchgate.net/publication/338557943_Fully_Automated_Photogrammetric_Data_Segmentation_and_Object_Information_Extraction_Approach_for_Creating_Simulation_Terrain},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),},
pages = {13},
publisher = {ResearchGate},
address = {Orlando, FL},
abstract = {Our previous works have demonstrated that visually realistic 3D meshes can be automatically reconstructed with lowcost, off-the-shelf unmanned aerial systems (UAS) equipped with capable cameras, and efficient photogrammetric software techniques (McAlinden, Suma, Grechkin, & Enloe, 2015; Spicer, McAlinden, Conover, & Adelphi, 2016). However, such generated data do not contain semantic information/features of objects (i.e., man-made objects, vegetation, ground, object materials, etc.) and cannot allow the sophisticated user-level and system-level interaction. Considering the use case of the data in creating realistic virtual environments for training and simulations (i.e., mission planning, rehearsal, threat detection, etc.), segmenting the data and extracting object information are essential tasks. Previous studies have focused on and made valuable contributions to segment Light Detection and Ranging (LIDAR) generated 3D point clouds and classifying ground materials from real-world images. However, only a few studies have focused on the data created using the photogrammetric technique.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Gordon, Andrew S.
Latent Terrain Representations for Trajectory Prediction Proceedings Article
In: Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19, pp. 1–4, ACM Press, Chicago, IL, USA, 2019, ISBN: 978-1-4503-6951-0.
@inproceedings{feng_latent_2019,
title = {Latent Terrain Representations for Trajectory Prediction},
author = {Andrew Feng and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3356392.3365218},
doi = {10.1145/3356392.3365218},
isbn = {978-1-4503-6951-0},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19},
pages = {1–4},
publisher = {ACM Press},
address = {Chicago, IL, USA},
abstract = {In natural outdoor environments, the shape of the surface terrain is an important factor in selecting a traversal path, both when operating off-road vehicles and maneuvering on foot. With the increased availability of digital elevation models for outdoor terrain, new opportunities exist to exploit this contextual information to improve automated path prediction. In this paper, we investigate predictive neural network models for outdoor trajectories that traverse terrain with known surface topography. We describe a method of encoding digital surface models as vectors in latent space using Wasserstein Autoencoders, and their use in convolutional neural networks that predict future trajectory positions from past trajectory data. We observe gains in predictive performance across three experiments, using both synthetic and recorded trajectories on real-world terrain.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shree, Jaya; Liu, Emily; Gordon, Andrew; Hobbs, Jerry
Deep Natural Language Understanding of News Text Proceedings Article
In: Proceedings of the First Workshop on Narrative Understanding, pp. 19–27, Association for Computational Linguistics, Minneapolis, Minnesota, 2019.
@inproceedings{shree_deep_2019,
title = {Deep Natural Language Understanding of News Text},
author = {Jaya Shree and Emily Liu and Andrew Gordon and Jerry Hobbs},
url = {https://www.aclweb.org/anthology/papers/W/W19/W19-2403/},
doi = {10.18653/v1/W19-2403},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the First Workshop on Narrative Understanding},
pages = {19–27},
publisher = {Association for Computational Linguistics},
address = {Minneapolis, Minnesota},
abstract = {Early proposals for the deep understanding of natural language text advocated an approach of “interpretation as abduction,” where the meaning of a text was derived as an explanation that logically entailed the input words, given a knowledge base of lexical and commonsense axioms. While most subsequent NLP research has instead pursued statistical and data-driven methods, the approach of interpretation as abduction has seen steady advancements in both theory and software implementations. In this paper, we summarize advances in deriving the logical form of the text, encoding commonsense knowledge, and technologies for scalable abductive reasoning. We then explore the application of these advancements to the deep understanding of a paragraph of news text, where the subtle meaning of words and phrases are resolved by backward chaining on a knowledge base of 80 hand-authored axioms.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Spierling, Ulrike
Playing Story Creation Games with Logical Abduction Book Section
In: Interactive Storytelling, vol. 11318, pp. 478–482, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-030-04027-7 978-3-030-04028-4.
@incollection{gordon_playing_2018,
title = {Playing Story Creation Games with Logical Abduction},
author = {Andrew S. Gordon and Ulrike Spierling},
url = {http://link.springer.com/10.1007/978-3-030-04028-4_55},
doi = {10.1007/978-3-030-04028-4_55},
isbn = {978-3-030-04027-7 978-3-030-04028-4},
year = {2018},
date = {2018-11-01},
booktitle = {Interactive Storytelling},
volume = {11318},
pages = {478–482},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Story Creation Games, such as Rory’s Story Cubes and the Tell Tale card game, require players to invent creative and coherent narratives from a set of unconnected elements assembled by random chance, e.g., the throw of a die or the draw of a card. We model this human ability as a process of logical abduction, where the reasoning task is to identify a set of assumptions about a fictional world that logically entail the elements depicted on the dice or on the cards. We demonstrate the feasibility of this approach by hand-authoring a knowledge base of axioms that is sufficient to generate eight creative narratives each related to three Tell Tale cards, depicting a baseball player, a heart, and a train.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Andrew S
Interpretation of the Heider-Simmel Film Using Incremental Etcetera Abduction Proceedings Article
In: Proceedings of the 6th Annual Conference on Advances in Cognitive Systems, Cognitive Systems Foundation, Stanford, CA, 2018.
@inproceedings{gordon_interpretation_2018,
title = {Interpretation of the Heider-Simmel Film Using Incremental Etcetera Abduction},
author = {Andrew S Gordon},
url = {http://www.cogsys.org/journal/volume-7},
year = {2018},
date = {2018-08-01},
booktitle = {Proceedings of the 6th Annual Conference on Advances in Cognitive Systems},
publisher = {Cognitive Systems Foundation},
address = {Stanford, CA},
abstract = {In 1944, psychologists Fritz Heider and Marianne Simmel created a short, 90-second animated film depicting two triangles and a circle moving around a box with a hinged opening, and reported how subjects viewing the film anthropomorphized the three shapes as characters with humanlike goals, emotions, and social relationships. In this paper we model this type of high-level reasoning as a process of probability-ordered logical abduction (Etcetera Abduction), where the interpretation of the film is incrementally constructed by disambiguating observed movements in the contexts of multiple running hypotheses. We describe a target interpretation and knowledge base that we used in a series of experiments to investigate the effects of varying the window size and number of running hypotheses maintained during the interpretation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew
Linguistic Features of Helpfulness in Automated Support for Creative Writing Proceedings Article
In: Proceedings of the First Workshop on Storytelling, pp. 14–19, 2018 Association for Computational Linguistics, New Orleans, LA, 2018.
@inproceedings{roemmele_linguistic_2018,
title = {Linguistic Features of Helpfulness in Automated Support for Creative Writing},
author = {Melissa Roemmele and Andrew Gordon},
url = {http://aclweb.org/anthology/W18-1502},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the First Workshop on Storytelling},
pages = {14–19},
publisher = {2018 Association for Computational Linguistics},
address = {New Orleans, LA},
abstract = {We examine an emerging NLP application that supports creative writing by automatically suggesting continuing sentences in a story. The application tracks users’ modifications to generated sentences, which can be used to quantify their “helpfulness” in advancing the story. We explore the task of predicting helpfulness based on automatically detected linguistic features of the suggestions. We illustrate this analysis on a set of user interactions with the application using an initial selection of features relevant to story generation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew
An Encoder-decoder Approach to Predicting Causal Relations in Stories Proceedings Article
In: Proceedings of the First Workshop on Storytelling, pp. 50–59, Association for Computational Linguistics, New Orleans, LA, 2018.
@inproceedings{roemmele_encoder-decoder_2018,
title = {An Encoder-decoder Approach to Predicting Causal Relations in Stories},
author = {Melissa Roemmele and Andrew Gordon},
url = {http://aclweb.org/anthology/W18-1506},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the First Workshop on Storytelling},
pages = {50–59},
publisher = {Association for Computational Linguistics},
address = {New Orleans, LA},
abstract = {We address the task of predicting causally related events in stories according to a standard evaluation framework, the Choice of Plausible Alternatives (COPA). We present a neural encoder-decoder model that learns to predict relations between adjacent sequences in stories as a means of modeling causality. We explore this approach using different methods for extracting and representing sequence pairs as well as different model architectures. We also compare the impact of different training datasets on our model. In particular, we demonstrate the usefulness of a corpus not previously applied to COPA, the ROCStories corpus. While not state-of-the-art, our results establish a new reference point for systems evaluated on COPA, and one that is particularly informative for future neural-based approaches.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Weber, René; Mangus, J. Michael; Huskey, Richard; Hopp, Frederic R.; Amir, Ori; Swanson, Reid; Gordon, Andrew; Khooshabeh, Peter; Hahn, Lindsay; Tamborini, Ron
Extracting Latent Moral Information from Text Narratives: Relevance, Challenges, and Solutions Journal Article
In: Communication Methods and Measures, vol. 12, no. 2-3, pp. 119–139, 2018, ISSN: 1931-2458, 1931-2466.
@article{weber_extracting_2018,
title = {Extracting Latent Moral Information from Text Narratives: Relevance, Challenges, and Solutions},
author = {René Weber and J. Michael Mangus and Richard Huskey and Frederic R. Hopp and Ori Amir and Reid Swanson and Andrew Gordon and Peter Khooshabeh and Lindsay Hahn and Ron Tamborini},
url = {https://www.tandfonline.com/doi/full/10.1080/19312458.2018.1447656},
doi = {10.1080/19312458.2018.1447656},
issn = {1931-2458, 1931-2466},
year = {2018},
date = {2018-03-01},
journal = {Communication Methods and Measures},
volume = {12},
number = {2-3},
pages = {119–139},
abstract = {Moral Foundations Theory (MFT) and the Model of Intuitive Morality and Exemplars (MIME) contend that moral judgments are built on a universal set of basic moral intuitions. A large body of research has supported many of MFT’s and the MIME’s central hypotheses. Yet, an important prerequisite of this research—the ability to extract latent moral content represented in media stimuli with a reliable procedure—has not been systematically studied. In this article, we subject different extraction procedures to rigorous tests, underscore challenges by identifying a range of reliabilities, develop new reliability test and coding procedures employing computational methods, and provide solutions that maximize the reliability and validity of moral intuition extraction. In six content analytical studies, including a large crowd-based study, we demonstrate that: (1) traditional content analytical approaches lead to rather low reliabilities; (2) variation in coding reliabilities can be predicted by both text features and characteristics of the human coders; and (3) reliability is largely unaffected by the detail of coder training. We show that a coding task with simplified training and a coding technique that treats moral foundations as fast, spontaneous intuitions leads to acceptable inter-rater agreement, and potentially to more valid moral intuition extractions. While this study was motivated by issues related to MFT and MIME research, the methods and findings in this study have implications for extracting latent content from text narratives that go beyond moral information. Accordingly, we provide a tool for researchers interested in applying this new approach in their own work.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2024
Nurunnabi, Abdul; Teferle, Felicia; Laefer, Debra F.; Chen, Meida; Ali, Mir Masoom
Development of a Precise Tree Structure from LiDAR Point Clouds Journal Article
In: Int. Arch. Photogramm. Remote Sens. Spatial Inf. Sci., vol. XLVIII-2-2024, pp. 301–308, 2024, ISSN: 2194-9034.
Abstract | Links | BibTeX | Tags: Narrative, VGL
@article{nurunnabi_development_2024,
title = {Development of a Precise Tree Structure from LiDAR Point Clouds},
author = {Abdul Nurunnabi and Felicia Teferle and Debra F. Laefer and Meida Chen and Mir Masoom Ali},
url = {https://isprs-archives.copernicus.org/articles/XLVIII-2-2024/301/2024/},
doi = {10.5194/isprs-archives-XLVIII-2-2024-301-2024},
issn = {2194-9034},
year = {2024},
date = {2024-06-01},
urldate = {2024-07-11},
journal = {Int. Arch. Photogramm. Remote Sens. Spatial Inf. Sci.},
volume = {XLVIII-2-2024},
pages = {301–308},
abstract = {Abstract. A precise tree structure that represents the distribution of tree stem, branches, and leaves is crucial for accurately capturing the full representation of a tree. Light Detection and Ranging (LiDAR)-based three-dimensional (3D) point clouds (PCs) capture the geometry of scanned objects including forests stands and individual trees. PCs are irregular, unstructured, often noisy, and contaminated by outliers. Researchers have struggled to develop methods to separate leaves and wood without losing the tree geometry. This paper proposes a solution that employs only the spatial coordinates (x, y, z) of the PC. The new algorithm works as a filtering approach, utilizing multi-scale neighborhood-based geometric features (GFs) e.g., linearity, planarity, and verticality to classify linear (wood) and non-linear (leaf) points. This involves finding potential wood points and coupling them with an octree-based segmentation to develop a tree architecture. The main contributions of this paper are (i) investigating the potential of different GFs to split linear and non-linear points, (ii) introducing a novel method that pointwise classifies leaf and wood points, and (iii) developing a precise 3D tree structure. The performance of the new algorithm has been demonstrated through terrestrial laser scanning PCs. For a Scots pine tree, the new method classifies leaf and wood points with an overall accuracy of 97.9%.},
keywords = {Narrative, VGL},
pubstate = {published},
tppubtype = {article}
}
Chemburkar, Ankur; Gordon, Andrew; Feng, Andrew
Evaluating Vision-Language Models on the TriangleCOPA Benchmark Journal Article
In: FLAIRS-37, vol. 37, 2024.
Abstract | BibTeX | Tags: DTIC, Narrative
@article{chemburkar_evaluating_2024,
title = {Evaluating Vision-Language Models on the TriangleCOPA Benchmark},
author = {Ankur Chemburkar and Andrew Gordon and Andrew Feng},
year = {2024},
date = {2024-05-01},
journal = {FLAIRS-37},
volume = {37},
abstract = {The TriangleCOPA benchmark consists of 100 textual questions with videos depicting the movements of simple shapes in the style of the classic social-psychology film created by Fritz Heider and Marianne Simmel in 1944. In our experiments, we investigate the performance of current vision-language models on this challenging benchmark, assessing the capability of these models for visual anthropomorphism and abstract interpretation.},
keywords = {DTIC, Narrative},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Feng, Andrew
Combining the Predictions of Out-of-Domain Classifiers Using Etcetera Abduction Proceedings Article
In: 2024 58th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Princeton, NJ, USA, 2024, ISBN: 9798350369298.
Links | BibTeX | Tags: DTIC, Narrative, The Narrative Group, UARC
@inproceedings{gordon_combining_2024,
title = {Combining the Predictions of Out-of-Domain Classifiers Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10480194/},
doi = {10.1109/CISS59072.2024.10480194},
isbn = {9798350369298},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
booktitle = {2024 58th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Princeton, NJ, USA},
keywords = {DTIC, Narrative, The Narrative Group, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Zifan; Tavakoli, Erfan Bank; Chen, Meida; You, Suya; Rao, Raghuveer; Agarwal, Sanjeev; Ren, Fengbo
TokenMotion: Motion-Guided Vision Transformer for Video Camouflaged Object Detection Via Learnable Token Selection Miscellaneous
2024, (arXiv:2311.02535 [cs]).
Abstract | Links | BibTeX | Tags: Narrative
@misc{yu_tokenmotion_2024,
title = {TokenMotion: Motion-Guided Vision Transformer for Video Camouflaged Object Detection Via Learnable Token Selection},
author = {Zifan Yu and Erfan Bank Tavakoli and Meida Chen and Suya You and Raghuveer Rao and Sanjeev Agarwal and Fengbo Ren},
url = {http://arxiv.org/abs/2311.02535},
year = {2024},
date = {2024-02-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {The area of Video Camouflaged Object Detection (VCOD) presents unique challenges in the field of computer vision due to texture similarities between target objects and their surroundings, as well as irregular motion patterns caused by both objects and camera movement. In this paper, we introduce TokenMotion (TMNet), which employs a transformer-based model to enhance VCOD by extracting motion-guided features using a learnable token selection. Evaluated on the challenging MoCA-Mask dataset, TMNet achieves state-of-the-art performance in VCOD. It outperforms the existing state-of-the-art method by a 12.8% improvement in weighted F-measure, an 8.4% enhancement in S-measure, and a 10.7% boost in mean IoU. The results demonstrate the benefits of utilizing motion-guided features via learnable token selection within a transformer-based framework to tackle the intricate task of VCOD.},
note = {arXiv:2311.02535 [cs]},
keywords = {Narrative},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ziming; Suen, Christine Wun Ki; Zou, Zhengbo; Chen, Meida; Shi, Yangming
Assessing Workers’ Operational Postures via Egocentric Camera Mapping Proceedings Article
In: Computing in Civil Engineering 2023, pp. 17–24, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8522-4.
Links | BibTeX | Tags: Narrative, STG
@inproceedings{liu_assessing_2024,
title = {Assessing Workers’ Operational Postures via Egocentric Camera Mapping},
author = {Ziming Liu and Christine Wun Ki Suen and Zhengbo Zou and Meida Chen and Yangming Shi},
url = {https://ascelibrary.org/doi/10.1061/9780784485224.003},
doi = {10.1061/9780784485224.003},
isbn = {978-0-7844-8522-4},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-19},
booktitle = {Computing in Civil Engineering 2023},
pages = {17–24},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {Narrative, STG},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Wang, Timothy S.; Gordon, Andrew S.
Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5 Book Section
In: Holloway-Attaway, Lissa; Murray, John T. (Ed.): Interactive Storytelling, vol. 14384, pp. 297–305, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-47657-0 978-3-031-47658-7, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, Narrative, UARC
@incollection{holloway-attaway_playing_2023,
title = {Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5},
author = {Timothy S. Wang and Andrew S. Gordon},
editor = {Lissa Holloway-Attaway and John T. Murray},
url = {https://link.springer.com/10.1007/978-3-031-47658-7_28},
doi = {10.1007/978-3-031-47658-7_28},
isbn = {978-3-031-47657-0 978-3-031-47658-7},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Interactive Storytelling},
volume = {14384},
pages = {297–305},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, Narrative, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Andrew S.; Feng, Andrew
Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction Proceedings Article
In: 2023 57th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Baltimore, MD, USA, 2023, ISBN: 978-1-66545-181-9.
Links | BibTeX | Tags: DTIC, Narrative, UARC
@inproceedings{gordon_searching_2023,
title = {Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10089729/},
doi = {10.1109/CISS56502.2023.10089729},
isbn = {978-1-66545-181-9},
year = {2023},
date = {2023-03-01},
urldate = {2023-08-07},
booktitle = {2023 57th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Baltimore, MD, USA},
keywords = {DTIC, Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Chen, Meida; Hu, Qingyong; Yu, Zifan; Thomas, Hugues; Feng, Andrew; Hou, Yu; McCullough, Kyle; Ren, Fengbo; Soibelman, Lucio
STPLS3D: A Large-Scale Synthetic and Real Aerial Photogrammetry 3D Point Cloud Dataset Miscellaneous
2022, (arXiv:2203.09065 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Narrative, UARC
@misc{chen_stpls3d_2022,
title = {STPLS3D: A Large-Scale Synthetic and Real Aerial Photogrammetry 3D Point Cloud Dataset},
author = {Meida Chen and Qingyong Hu and Zifan Yu and Hugues Thomas and Andrew Feng and Yu Hou and Kyle McCullough and Fengbo Ren and Lucio Soibelman},
url = {http://arxiv.org/abs/2203.09065},
year = {2022},
date = {2022-10-01},
urldate = {2023-08-22},
publisher = {arXiv},
abstract = {Although various 3D datasets with different functions and scales have been proposed recently, it remains challenging for individuals to complete the whole pipeline of large-scale data collection, sanitization, and annotation. Moreover, the created datasets usually suffer from extremely imbalanced class distribution or partial low-quality data samples. Motivated by this, we explore the procedurally synthetic 3D data generation paradigm to equip individuals with the full capability of creating large-scale annotated photogrammetry point clouds. Specifically, we introduce a synthetic aerial photogrammetry point clouds generation pipeline that takes full advantage of open geospatial data sources and off-the-shelf commercial packages. Unlike generating synthetic data in virtual games, where the simulated data usually have limited gaming environments created by artists, the proposed pipeline simulates the reconstruction process of the real environment by following the same UAV flight pattern on different synthetic terrain shapes and building densities, which ensure similar quality, noise pattern, and diversity with real data. In addition, the precise semantic and instance annotations can be generated fully automatically, avoiding the expensive and time-consuming manual annotation. Based on the proposed pipeline, we present a richly-annotated synthetic 3D aerial photogrammetry point cloud dataset, termed STPLS3D, with more than 16 $kmˆ2$ of landscapes and up to 18 fine-grained semantic categories. For verification purposes, we also provide a parallel dataset collected from four areas in the real environment. Extensive experiments conducted on our datasets demonstrate the effectiveness and quality of the proposed synthetic dataset.},
note = {arXiv:2203.09065 [cs]},
keywords = {DTIC, Narrative, UARC},
pubstate = {published},
tppubtype = {misc}
}
2021
Gordon, Andrew S.; Wang, Timothy S.
Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates Book Section
In: Mitchell, Alex; Vosmeer, Mirjam (Ed.): Interactive Storytelling, vol. 13138, pp. 71–79, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-92299-3 978-3-030-92300-6.
Links | BibTeX | Tags: DTIC, Narrative, UARC
@incollection{gordon_narrative_2021,
title = {Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates},
author = {Andrew S. Gordon and Timothy S. Wang},
editor = {Alex Mitchell and Mirjam Vosmeer},
url = {https://link.springer.com/10.1007/978-3-030-92300-6_7},
doi = {10.1007/978-3-030-92300-6_7},
isbn = {978-3-030-92299-3 978-3-030-92300-6},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-22},
booktitle = {Interactive Storytelling},
volume = {13138},
pages = {71–79},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {DTIC, Narrative, UARC},
pubstate = {published},
tppubtype = {incollection}
}
2020
Chen, Meida; Feng, Andrew; McCullough, Kyle; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio
3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework Journal Article
In: Journal of Computing in Civil Engineering, vol. 34, no. 6, 2020, ISSN: 0887-3801, 1943-5487.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@article{chen_3d_2020,
title = {3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework},
author = {Meida Chen and Andrew Feng and Kyle McCullough and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0000929},
doi = {10.1061/(ASCE)CP.1943-5487.0000929},
issn = {0887-3801, 1943-5487},
year = {2020},
date = {2020-11-01},
journal = {Journal of Computing in Civil Engineering},
volume = {34},
number = {6},
abstract = {The US Army is paying increased attention to the development of rapid three-dimensional (3D) reconstruction using photogrammetry and unmanned aerial vehicle (UAV) technologies for creating virtual environments and simulations in areas of interest. The ability of the intelligence community, mission commanders, and front-line soldiers to understand their deployed physical environment in advance is critical in the planning and rehearsal phases of any military operation. In order to achieve various simulation capabilities such as destruction operations, route planning, and explosive-standoff distances computation among others, reconstructed 3D data needs to be properly attributed. In this paper, we introduce a model ensembling framework for segmenting a 3D photogrammetry point cloud into top-level terrain elements (i.e., ground, human-made objects, and vegetation). Preprocessing and postprocessing methods were designed to overcome the data segmentation challenges posed by photogrammetric data-quality issues. A large UAV-based photogrammetric database was created for validation purposes. The designed model ensembling framework was compared with existing point cloud segmentation algorithms, and it outperformed other algorithms and achieved the best F1-score. Because the ultimate goal of segmenting a photogrammetric-generated point cloud is to create realistic virtual environments for simulation. Qualitative results for creating virtual environments using the segmented data are also discussed in this paper. DOI: 10.1061/(ASCE)CP.1943-5487.0000929. © 2020 American Society of Civil Engineers.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Miller, Rob; Morgenstern, Leora; Turán, György
Preface: Special Issue on Commonsense Reasoning, Annals of Mathematics and Artificial Intelligence Journal Article
In: Annals of Mathematics and Artificial Intelligence, 2020, ISSN: 1012-2443, 1573-7470.
Abstract | Links | BibTeX | Tags: Narrative
@article{gordon_preface_2020,
title = {Preface: Special Issue on Commonsense Reasoning, Annals of Mathematics and Artificial Intelligence},
author = {Andrew S. Gordon and Rob Miller and Leora Morgenstern and György Turán},
url = {http://link.springer.com/10.1007/s10472-020-09711-5},
doi = {10.1007/s10472-020-09711-5},
issn = {1012-2443, 1573-7470},
year = {2020},
date = {2020-09-01},
journal = {Annals of Mathematics and Artificial Intelligence},
abstract = {A few years after the 1956 Dartmouth Summer Workshop [1, 2], which first established artificial intelligence as a field of research, John McCarthy [3] discussed the importance of explicitly representing and reasoning with commonsense knowledge to the enterprise of creating artificially intelligent robots and agents. McCarthy proposed that commonsense knowledge was best represented using formal logic, which he viewed as a uniquely powerful lingua franca that could be used to express and reason with virtually any sort of information that humans might reason with when problem solving, a stance he further developed and propounded in [4, 5]. This approach, the formalist or logic-based approach to commonsense reasoning, was practiced by an increasing set of adherents over the next several decades [6, 7], and continues to be represented by the Commonsense Symposium Series, first held in 1991 [8] and held biennially, for the most part, after that.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {article}
}
Feng, Andrew; Gordon, Andrew S.
Recognizing Multiplayer Behaviors Using Synthetic Training Data Proceedings Article
In: 2020 IEEE Conference on Games (CoG), pp. 463–470, 2020, (ISSN: 2325-4289).
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{feng_recognizing_2020,
title = {Recognizing Multiplayer Behaviors Using Synthetic Training Data},
author = {Andrew Feng and Andrew S. Gordon},
doi = {10.1109/CoG47356.2020.9231742},
year = {2020},
date = {2020-08-01},
booktitle = {2020 IEEE Conference on Games (CoG)},
pages = {463–470},
abstract = {Accurate recognition of group behaviors is essential to the design of engaging networked multiplayer games. However, contemporary data-driven machine learning solutions are difficult to apply during the game development process, given that no authentic gameplay data is yet available for use as training data. In this paper, we investigate the use of synthetic training data, i.e., gameplay data that is generated by AI-controlled agent teams programmed to perform each of the behaviors to be recognized in groups of human players. The particular task we focus on is to recognize group movement formations in player-controlled avatars in a realistic virtual world. We choose five typical military team movement patterns for the formation recognition task and train machine learning models using procedurally generated unit trajectories as training data. The experiments were conducted using ResNet and EfficientNet, which are two popular convolutional neural network architectures for image classifications. The synthetic data is augmented by creating variations in image rotation, unit spacing, team size, and positional perturbations to bridge the gap between synthetic and human gameplay data. We demonstrate that high-accuracy behavior recognition can be achieved using deep neural networks by applying the aforementioned data augmentation methods to simulated gameplay data.},
note = {ISSN: 2325-4289},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio; Enloe, Mike
Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),, pp. 13, ResearchGate, Orlando, FL, 2020.
Abstract | Links | BibTeX | Tags: Graphics, Narrative, STG, UARC
@inproceedings{chen_fully_2020,
title = {Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain},
author = {Meida Chen and Andrew Feng and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman and Mike Enloe},
url = {https://www.researchgate.net/publication/338557943_Fully_Automated_Photogrammetric_Data_Segmentation_and_Object_Information_Extraction_Approach_for_Creating_Simulation_Terrain},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),},
pages = {13},
publisher = {ResearchGate},
address = {Orlando, FL},
abstract = {Our previous works have demonstrated that visually realistic 3D meshes can be automatically reconstructed with lowcost, off-the-shelf unmanned aerial systems (UAS) equipped with capable cameras, and efficient photogrammetric software techniques (McAlinden, Suma, Grechkin, & Enloe, 2015; Spicer, McAlinden, Conover, & Adelphi, 2016). However, such generated data do not contain semantic information/features of objects (i.e., man-made objects, vegetation, ground, object materials, etc.) and cannot allow the sophisticated user-level and system-level interaction. Considering the use case of the data in creating realistic virtual environments for training and simulations (i.e., mission planning, rehearsal, threat detection, etc.), segmenting the data and extracting object information are essential tasks. Previous studies have focused on and made valuable contributions to segment Light Detection and Ranging (LIDAR) generated 3D point clouds and classifying ground materials from real-world images. However, only a few studies have focused on the data created using the photogrammetric technique.},
keywords = {Graphics, Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Feng, Andrew; Gordon, Andrew S.
Latent Terrain Representations for Trajectory Prediction Proceedings Article
In: Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19, pp. 1–4, ACM Press, Chicago, IL, USA, 2019, ISBN: 978-1-4503-6951-0.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@inproceedings{feng_latent_2019,
title = {Latent Terrain Representations for Trajectory Prediction},
author = {Andrew Feng and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3356392.3365218},
doi = {10.1145/3356392.3365218},
isbn = {978-1-4503-6951-0},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19},
pages = {1–4},
publisher = {ACM Press},
address = {Chicago, IL, USA},
abstract = {In natural outdoor environments, the shape of the surface terrain is an important factor in selecting a traversal path, both when operating off-road vehicles and maneuvering on foot. With the increased availability of digital elevation models for outdoor terrain, new opportunities exist to exploit this contextual information to improve automated path prediction. In this paper, we investigate predictive neural network models for outdoor trajectories that traverse terrain with known surface topography. We describe a method of encoding digital surface models as vectors in latent space using Wasserstein Autoencoders, and their use in convolutional neural networks that predict future trajectory positions from past trajectory data. We observe gains in predictive performance across three experiments, using both synthetic and recorded trajectories on real-world terrain.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Shree, Jaya; Liu, Emily; Gordon, Andrew; Hobbs, Jerry
Deep Natural Language Understanding of News Text Proceedings Article
In: Proceedings of the First Workshop on Narrative Understanding, pp. 19–27, Association for Computational Linguistics, Minneapolis, Minnesota, 2019.
Abstract | Links | BibTeX | Tags: Narrative
@inproceedings{shree_deep_2019,
title = {Deep Natural Language Understanding of News Text},
author = {Jaya Shree and Emily Liu and Andrew Gordon and Jerry Hobbs},
url = {https://www.aclweb.org/anthology/papers/W/W19/W19-2403/},
doi = {10.18653/v1/W19-2403},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the First Workshop on Narrative Understanding},
pages = {19–27},
publisher = {Association for Computational Linguistics},
address = {Minneapolis, Minnesota},
abstract = {Early proposals for the deep understanding of natural language text advocated an approach of “interpretation as abduction,” where the meaning of a text was derived as an explanation that logically entailed the input words, given a knowledge base of lexical and commonsense axioms. While most subsequent NLP research has instead pursued statistical and data-driven methods, the approach of interpretation as abduction has seen steady advancements in both theory and software implementations. In this paper, we summarize advances in deriving the logical form of the text, encoding commonsense knowledge, and technologies for scalable abductive reasoning. We then explore the application of these advancements to the deep understanding of a paragraph of news text, where the subtle meaning of words and phrases are resolved by backward chaining on a knowledge base of 80 hand-authored axioms.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Gordon, Andrew S.; Spierling, Ulrike
Playing Story Creation Games with Logical Abduction Book Section
In: Interactive Storytelling, vol. 11318, pp. 478–482, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-030-04027-7 978-3-030-04028-4.
Abstract | Links | BibTeX | Tags: Narrative
@incollection{gordon_playing_2018,
title = {Playing Story Creation Games with Logical Abduction},
author = {Andrew S. Gordon and Ulrike Spierling},
url = {http://link.springer.com/10.1007/978-3-030-04028-4_55},
doi = {10.1007/978-3-030-04028-4_55},
isbn = {978-3-030-04027-7 978-3-030-04028-4},
year = {2018},
date = {2018-11-01},
booktitle = {Interactive Storytelling},
volume = {11318},
pages = {478–482},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Story Creation Games, such as Rory’s Story Cubes and the Tell Tale card game, require players to invent creative and coherent narratives from a set of unconnected elements assembled by random chance, e.g., the throw of a die or the draw of a card. We model this human ability as a process of logical abduction, where the reasoning task is to identify a set of assumptions about a fictional world that logically entail the elements depicted on the dice or on the cards. We demonstrate the feasibility of this approach by hand-authoring a knowledge base of axioms that is sufficient to generate eight creative narratives each related to three Tell Tale cards, depicting a baseball player, a heart, and a train.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Andrew S
Interpretation of the Heider-Simmel Film Using Incremental Etcetera Abduction Proceedings Article
In: Proceedings of the 6th Annual Conference on Advances in Cognitive Systems, Cognitive Systems Foundation, Stanford, CA, 2018.
Abstract | Links | BibTeX | Tags: Narrative
@inproceedings{gordon_interpretation_2018,
title = {Interpretation of the Heider-Simmel Film Using Incremental Etcetera Abduction},
author = {Andrew S Gordon},
url = {http://www.cogsys.org/journal/volume-7},
year = {2018},
date = {2018-08-01},
booktitle = {Proceedings of the 6th Annual Conference on Advances in Cognitive Systems},
publisher = {Cognitive Systems Foundation},
address = {Stanford, CA},
abstract = {In 1944, psychologists Fritz Heider and Marianne Simmel created a short, 90-second animated film depicting two triangles and a circle moving around a box with a hinged opening, and reported how subjects viewing the film anthropomorphized the three shapes as characters with humanlike goals, emotions, and social relationships. In this paper we model this type of high-level reasoning as a process of probability-ordered logical abduction (Etcetera Abduction), where the interpretation of the film is incrementally constructed by disambiguating observed movements in the contexts of multiple running hypotheses. We describe a target interpretation and knowledge base that we used in a series of experiments to investigate the effects of varying the window size and number of running hypotheses maintained during the interpretation.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew
Linguistic Features of Helpfulness in Automated Support for Creative Writing Proceedings Article
In: Proceedings of the First Workshop on Storytelling, pp. 14–19, 2018 Association for Computational Linguistics, New Orleans, LA, 2018.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{roemmele_linguistic_2018,
title = {Linguistic Features of Helpfulness in Automated Support for Creative Writing},
author = {Melissa Roemmele and Andrew Gordon},
url = {http://aclweb.org/anthology/W18-1502},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the First Workshop on Storytelling},
pages = {14–19},
publisher = {2018 Association for Computational Linguistics},
address = {New Orleans, LA},
abstract = {We examine an emerging NLP application that supports creative writing by automatically suggesting continuing sentences in a story. The application tracks users’ modifications to generated sentences, which can be used to quantify their “helpfulness” in advancing the story. We explore the task of predicting helpfulness based on automatically detected linguistic features of the suggestions. We illustrate this analysis on a set of user interactions with the application using an initial selection of features relevant to story generation.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew
An Encoder-decoder Approach to Predicting Causal Relations in Stories Proceedings Article
In: Proceedings of the First Workshop on Storytelling, pp. 50–59, Association for Computational Linguistics, New Orleans, LA, 2018.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{roemmele_encoder-decoder_2018,
title = {An Encoder-decoder Approach to Predicting Causal Relations in Stories},
author = {Melissa Roemmele and Andrew Gordon},
url = {http://aclweb.org/anthology/W18-1506},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the First Workshop on Storytelling},
pages = {50–59},
publisher = {Association for Computational Linguistics},
address = {New Orleans, LA},
abstract = {We address the task of predicting causally related events in stories according to a standard evaluation framework, the Choice of Plausible Alternatives (COPA). We present a neural encoder-decoder model that learns to predict relations between adjacent sequences in stories as a means of modeling causality. We explore this approach using different methods for extracting and representing sequence pairs as well as different model architectures. We also compare the impact of different training datasets on our model. In particular, we demonstrate the usefulness of a corpus not previously applied to COPA, the ROCStories corpus. While not state-of-the-art, our results establish a new reference point for systems evaluated on COPA, and one that is particularly informative for future neural-based approaches.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Weber, René; Mangus, J. Michael; Huskey, Richard; Hopp, Frederic R.; Amir, Ori; Swanson, Reid; Gordon, Andrew; Khooshabeh, Peter; Hahn, Lindsay; Tamborini, Ron
Extracting Latent Moral Information from Text Narratives: Relevance, Challenges, and Solutions Journal Article
In: Communication Methods and Measures, vol. 12, no. 2-3, pp. 119–139, 2018, ISSN: 1931-2458, 1931-2466.
Abstract | Links | BibTeX | Tags: ARL, DoD, Narrative
@article{weber_extracting_2018,
title = {Extracting Latent Moral Information from Text Narratives: Relevance, Challenges, and Solutions},
author = {René Weber and J. Michael Mangus and Richard Huskey and Frederic R. Hopp and Ori Amir and Reid Swanson and Andrew Gordon and Peter Khooshabeh and Lindsay Hahn and Ron Tamborini},
url = {https://www.tandfonline.com/doi/full/10.1080/19312458.2018.1447656},
doi = {10.1080/19312458.2018.1447656},
issn = {1931-2458, 1931-2466},
year = {2018},
date = {2018-03-01},
journal = {Communication Methods and Measures},
volume = {12},
number = {2-3},
pages = {119–139},
abstract = {Moral Foundations Theory (MFT) and the Model of Intuitive Morality and Exemplars (MIME) contend that moral judgments are built on a universal set of basic moral intuitions. A large body of research has supported many of MFT’s and the MIME’s central hypotheses. Yet, an important prerequisite of this research—the ability to extract latent moral content represented in media stimuli with a reliable procedure—has not been systematically studied. In this article, we subject different extraction procedures to rigorous tests, underscore challenges by identifying a range of reliabilities, develop new reliability test and coding procedures employing computational methods, and provide solutions that maximize the reliability and validity of moral intuition extraction. In six content analytical studies, including a large crowd-based study, we demonstrate that: (1) traditional content analytical approaches lead to rather low reliabilities; (2) variation in coding reliabilities can be predicted by both text features and characteristics of the human coders; and (3) reliability is largely unaffected by the detail of coder training. We show that a coding task with simplified training and a coding technique that treats moral foundations as fast, spontaneous intuitions leads to acceptable inter-rater agreement, and potentially to more valid moral intuition extractions. While this study was motivated by issues related to MFT and MIME research, the methods and findings in this study have implications for extracting latent content from text narratives that go beyond moral information. Accordingly, we provide a tool for researchers interested in applying this new approach in their own work.},
keywords = {ARL, DoD, Narrative},
pubstate = {published},
tppubtype = {article}
}
Roemmele, Melissa; Gordon, Andrew S.
Automated Assistance for Creative Writing with an RNN Language Model Proceedings Article
In: Proceedings of ACM Intelligent User Interfaces, pp. 1–2, ACM Press, Tokyo, Japan, 2018, ISBN: 978-1-4503-5571-1.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{roemmele_automated_2018,
title = {Automated Assistance for Creative Writing with an RNN Language Model},
author = {Melissa Roemmele and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3180308.3180329},
doi = {10.1145/3180308.3180329},
isbn = {978-1-4503-5571-1},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of ACM Intelligent User Interfaces},
pages = {1–2},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {This work demonstrates an interface, Creative Help, that assists people with creative writing by automatically suggesting new sentences in a story. Authors can freely edit the generated suggestions, and the application tracks their modifications. We make use of a Recurrent Neural Network language model to generate suggestions in a simple probabilistic way. Motivated by the theorized role of unpredictability in creativity, we vary the degree of randomness in the probability distribution used to generate the sentences, and find that authors’ interactions with the suggestions are influenced by this randomness.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gonzalez, Diego; Gordon, Andrew S.
Comparing Speech and Text Input in Interactive Narratives Proceedings Article
In: Proceedings of ACM Intelligent User Interfaces, pp. 141–145, ACM Press, Tokyo, Japan, 2018, ISBN: 978-1-4503-4945-1.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{gonzalez_comparing_2018,
title = {Comparing Speech and Text Input in Interactive Narratives},
author = {Diego Gonzalez and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3172944.3172999},
doi = {10.1145/3172944.3172999},
isbn = {978-1-4503-4945-1},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of ACM Intelligent User Interfaces},
pages = {141–145},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {Intelligent user interfaces are finding new applications in interactive narratives, where players take on the role of a character in a fictional storyline. A recent example is the interactive audio narrative "Traveler", in which a combination of technologies for speech recognition and unsupervised text classification allow players to navigate a branching storyline via open-vocabulary spoken input. We hypothesize that the affordances of audio-based interaction in interactive narratives are different than text-based interaction, and that these differences change the player experience and their understanding of their fictional role. To test this hypothesis, we conducted a controlled experiment (n=39) to compare player interaction in "Traveler" with a text-only variant of the same storyline. We found significant differences in the types of input provided by players, suggesting that interaction modality impacts how players conceive of their relation to narrators of fictional storylines.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Roemmele, Melissa; Gordon, Andrew S.
Lexical preferences in an automated story writing system Proceedings Article
In: Proceedings of the 31st Conference on Neural Information Processing Systems (NIPS 2017), Long Beach, CA, 2017.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{roemmele_lexical_2017,
title = {Lexical preferences in an automated story writing system},
author = {Melissa Roemmele and Andrew S. Gordon},
url = {http://people.ict.usc.edu/ gordon/publications/NIPS-WS17},
year = {2017},
date = {2017-12-01},
booktitle = {Proceedings of the 31st Conference on Neural Information Processing Systems (NIPS 2017)},
address = {Long Beach, CA},
abstract = {The field of artificial intelligence has long envisioned the ability of computers to automatically write stories (Dehn [1981], Lebowitz [1985], Meehan [1977], Turner [1993]). For a long time, progress on this task was limited by the difficulty of encoding the vast narrative knowledge needed to produce stories with diverse content. The rise of data-driven approaches to AI introduced the opportunity to acquire this knowledge automatically from story corpora. Since then, this approach has been utilized to generate narratives for different domains and genres (Li et al. [2013], McIntyre and Lapata [2009]), which has in turn made it possible for systems to collaborate with human authors in developing stories (Khalifa et al. [2017], Manjavacas et al. [2017], Swanson and Gordon [2012]). Roemmele and Gordon [2015] introduced a web-based application called Creative Help that provides automated assistance for writing stories. The interface consists of a text box where users type “textbackslashtextbackslashtextbackslashtextbackslashhelptextbackslashtextbackslashtextbackslashtextbackslash” to automatically generate a suggestion for the next sentence in the story. One novelty of the application is that it tracks users’ modifications to the suggestions, which enables the original and modified form of a suggestion to be compared. This enables sentences generated by different models to be comparatively evaluated in terms of their influence on the story. We examined a dataset of 1182 Creative Help interactions produced by a total of 139 authors, where each interaction consists of the generated suggestion and the author’s corresponding modification. The suggestions were generated by a Recurrent Neural Network language model (RNN LM), as described in Roemmele et al. [2017], which generates sentences by iteratively sampling words according to their observed probability in a corpus. The training corpus for the model analyzed here was 8032 books (a little over half a billion words) in the BookCorpus1, which contains freely available fiction from a variety of genres. This paper briefly characterizes the generated sentences by highlighting their most prominent words and phrases and showing examples of them in context.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Cychosz, Margaret; Gordon, Andrew S.; Odimegwu, Obiageli; Connolly, Olivia; Bellassai, Jenna; Roemmele, Melissa
Effective Scenario Designs for Free-Text Interactive Fiction Proceedings Article
In: Proceedings of the International Conference on Interactive Digital Storytelling, pp. 12–23, Springer International Publishing, Funchal Madeira, Portugal, 2017.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{cychosz_effective_2017,
title = {Effective Scenario Designs for Free-Text Interactive Fiction},
author = {Margaret Cychosz and Andrew S. Gordon and Obiageli Odimegwu and Olivia Connolly and Jenna Bellassai and Melissa Roemmele},
url = {https://link.springer.com/chapter/10.1007/978-3-319-71027-3_2},
doi = {10.1007/978-3-319-71027-3_2},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of the International Conference on Interactive Digital Storytelling},
pages = {12–23},
publisher = {Springer International Publishing},
address = {Funchal Madeira, Portugal},
abstract = {Free-text interactive fiction allows players to narrate the actions of protagonists via natural language input, which are automatically directed to appropriate storyline outcomes using natural language processing techniques. We describe an authoring platform called the Data-driven Interactive Narrative Engine (DINE), which supports free-text interactive fiction by connecting player input to authored outcomes using unsupervised text classification techniques based on text corpus statistics. We hypothesize that the coherence of the interaction, as judged by the players of a DINE scenario, is dependent on specific design choices made by the author. We describe three empirical experiments with crowdsourced subjects to investigate how authoring choices impacted the coherence of the interaction, finding that scenario design and writing style can predict significant differences.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Swanson, Reid William; Gordon, Andrew S.; Khooshabeh, Peter; Sagae, Kenji; Huskey, Richard; Mangus, Michael; Amir, Ori; Weber, Rene
An Empirical Analysis of Subjectivity and Narrative Levels in Weblog Storytelling Across Cultures Journal Article
In: Dialogue & Discourse, vol. 8, no. 2, pp. 105–128, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, ICB, Narrative, UARC
@article{swanson_empirical_2017,
title = {An Empirical Analysis of Subjectivity and Narrative Levels in Weblog Storytelling Across Cultures},
author = {Reid William Swanson and Andrew S. Gordon and Peter Khooshabeh and Kenji Sagae and Richard Huskey and Michael Mangus and Ori Amir and Rene Weber},
url = {https://www.researchgate.net/publication/321170929_An_Empirical_Analysis_of_Subjectivity_and_Narrative_Levels_in_Personal_Weblog_Storytelling_Across_Cultures?_sg=Ck1pqxhW1uuTUe54DX5BLVYey6L6DkwTpjnes1ctAEuGQDHxoEOr887eKWjHIA0_-kk4ya9dXwEZ4OM},
doi = {10.5087/dad.2017.205},
year = {2017},
date = {2017-11-01},
journal = {Dialogue & Discourse},
volume = {8},
number = {2},
pages = {105–128},
abstract = {Storytelling is a universal activity, but the way in which discourse structure is used to persuasively convey ideas and emotions may depend on cultural factors. Because first-person accounts of life experiences can have a powerful impact in how a person is perceived, the storyteller may instinctively employ specific strategies to shape the audience’s perception. Hypothesizing that some of the differences in storytelling can be captured by the use of narrative levels and subjectivity, we analyzed over one thousand narratives taken from personal weblogs. First, we compared stories from three different cultures written in their native languages: English, Chinese and Farsi. Second, we examined the impact of these two discourse properties on a reader’s attitude and behavior toward the narrator. We found surprising similarities and differences in how stories are structured along these two dimensions across cultures. These discourse properties have a small but significant impact on a reader’s behavioral response toward the narrator.},
keywords = {ARL, DoD, ICB, Narrative, UARC},
pubstate = {published},
tppubtype = {article}
}
Treanor, Mike; Warren, Nicholas; Reed, Mason; Smith, Adam M.; Ortiz, Pablo; Carney, Laurel; Sherman, Loren; Carré, Elizabeth; Vivatvisha, Nadya; Harrell, D. Fox; Mardo, Paola; Gordon, Andrew; Dormans, Joris; Robison, Barrie; Gomez, Spencer; Heck, Samantha; Wright, Landon; Soule, Terence
Playable Experiences at AIIDE 2017 Proceedings Article
In: Proceedings of The Thirteenth AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment (AIIDE-17), Association for the Advancement of Artificial Intelligence, Snowbird, Utah, 2017.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{treanor_playable_2017,
title = {Playable Experiences at AIIDE 2017},
author = {Mike Treanor and Nicholas Warren and Mason Reed and Adam M. Smith and Pablo Ortiz and Laurel Carney and Loren Sherman and Elizabeth Carré and Nadya Vivatvisha and D. Fox Harrell and Paola Mardo and Andrew Gordon and Joris Dormans and Barrie Robison and Spencer Gomez and Samantha Heck and Landon Wright and Terence Soule},
url = {https://pdfs.semanticscholar.org/19f9/a76f6edcc6aa41bf19dba017da8c1c01e2b3.pdf},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of The Thirteenth AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment (AIIDE-17)},
publisher = {Association for the Advancement of Artificial Intelligence},
address = {Snowbird, Utah},
abstract = {This paper describes the accepted entries to the fifth Playable Experiences track to be held at the AIIDE conference. The Playable Experiences track showcases complete works that make use of artificial intelligence techniques as an integral part of the player experience.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Prasad, Keshav; Briet, Kayla; Odimegwu, Obiageli; Connolly, Olivia; Gonzalez, Diego; Gordon, Andrew S.
“The Long Walk” From Linear Film to Interactive Narrative Proceedings Article
In: Proceedings of the 10th International Workshop on Intelligent Narrative Technologies (INT10), AAAI, Snowbird, Utah, 2017.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{prasad_long_2017,
title = {“The Long Walk” From Linear Film to Interactive Narrative},
author = {Keshav Prasad and Kayla Briet and Obiageli Odimegwu and Olivia Connolly and Diego Gonzalez and Andrew S. Gordon},
url = {http://people.ict.usc.edu/ gordon/publications/INT17B},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of the 10th International Workshop on Intelligent Narrative Technologies (INT10)},
publisher = {AAAI},
address = {Snowbird, Utah},
abstract = {Advances in hardware and software for virtual reality and 360-degree video afford new opportunities for immersive digital storytelling, but also pose new challenges as players seek an increased sense of meaningful agency in fictional storyworlds. In this paper, we explore the interaction designs afforded by voice-controlled interactive narratives, where players speak their intended actions when prompted at choice points in branching storylines. We describe seven interaction design patterns that balance the player’s need for meaningful agency with an author’s goal to present an intended storyline. We argue that these structural designs are orthogonal to the content of a story, such that any particular story may be effectively restructured to use different patterns. By way of demonstration, we describe our efforts to remix and restructure a 360-degree film entitled The Long Walk, transforming it from a largely linear narrative with minimal interactivity into a voice-controlled interactive narrative with meaningful player agency.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bellassai, Jenna; Gordon, Andrew S.; Roemmele, Melissa; Cychosz, Margaret; Odimegwu, Obiageli; Connolly, Olivia
Unsupervised Text Classification for Natural Language Interactive Narratives Proceedings Article
In: Proceedings of the 10th International Workshop on Intelligent Narrative Technologies (INT10), AAAI, Snowbird, Utah, 2017.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{bellassai_unsupervised_2017,
title = {Unsupervised Text Classification for Natural Language Interactive Narratives},
author = {Jenna Bellassai and Andrew S. Gordon and Melissa Roemmele and Margaret Cychosz and Obiageli Odimegwu and Olivia Connolly},
url = {http://people.ict.usc.edu/ gordon/publications/INT17A},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of the 10th International Workshop on Intelligent Narrative Technologies (INT10)},
publisher = {AAAI},
address = {Snowbird, Utah},
abstract = {Natural language interactive narratives are a variant of traditional branching storylines where player actions are expressed in natural language rather than by selecting among choices. Previous efforts have handled the richness of natural language input using machine learning technologies for text classification, bootstrapping supervised machine learning approaches with human-in-the-loop data acquisition or by using expected player input as fake training data. This paper explores a third alternative, where unsupervised text classifiers are used to automatically route player input to the most appropriate storyline branch.We describe the Data-driven Interactive Narrative Engine (DINE), a web-based tool for authoring and deploying natural language interactive narratives. To compare the performance of different algorithms for unsupervised text classification, we collected thousands of user inputs from hundreds of crowdsourced participants playing 25 different scenarios, and hand-annotated them to create a goldstandard test set. Through comparative evaluations, we identified an unsupervised algorithm for narrative text classification that approaches the performance of supervised text classification algorithms. We discuss how this technology supports authors in the rapid creation and deployment of interactive narrative experiences, with authorial burdens similar to that of traditional branching storylines.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Hobbs, Jerry R.
A Formal Theory of Commonsense Psychology: How People Think People Think Book
Cambridge University Press, Cambridge, UK, 2017, ISBN: 978-1-108-50963-3.
Abstract | Links | BibTeX | Tags: Narrative
@book{gordon_formal_2017,
title = {A Formal Theory of Commonsense Psychology: How People Think People Think},
author = {Andrew S. Gordon and Jerry R. Hobbs},
url = {https://books.google.com/books?id=OEY3DwAAQBAJ&printsec=frontcover&source=gbs_ge_summary_r&cad=0#v=onepage&q&f=false},
isbn = {978-1-108-50963-3},
year = {2017},
date = {2017-09-01},
publisher = {Cambridge University Press},
address = {Cambridge, UK},
abstract = {Commonsense psychology refers to the implicit theories that we all use to make sense of people's behavior in terms of their beliefs, goals, plans, and emotions. These are also the theories we employ when we anthropomorphize complex machines and computers as if they had humanlike mental lives. In order to successfully cooperate and communicate with people, these theories will need to be represented explicitly in future artificial intelligence systems. This book provides a large-scale logical formalization of commonsense psychology in support of humanlike artificial intelligence. It uses formal logic to encode the deep lexical semantics of the full breadth of psychological words and phrases, providing fourteen hundred axioms of first-order logic organized into twenty-nine commonsense psychology theories and sixteen background theories. This in-depth exploration of human commonsense reasoning for artificial intelligence researchers, linguists, and cognitive and social psychologists will serve as a foundation for the development of humanlike artificial intelligence.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {book}
}
Roemmele, Melissa; Mardo, Paola; Gordon, Andrew S.
Natural-language Interactive Narratives in Imaginal Exposure Therapy for Obsessive-Compulsive Disorder Proceedings Article
In: Proceedings of the Computational Linguistics and Clinical Psychology Workshop (CLPsych), pp. 48–57, Association for Computational Linguistics, Vancouver, Canada, 2017.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{roemmele_natural-language_2017,
title = {Natural-language Interactive Narratives in Imaginal Exposure Therapy for Obsessive-Compulsive Disorder},
author = {Melissa Roemmele and Paola Mardo and Andrew S. Gordon},
url = {http://www.aclweb.org/anthology/W17-31#page=58},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the Computational Linguistics and Clinical Psychology Workshop (CLPsych)},
pages = {48–57},
publisher = {Association for Computational Linguistics},
address = {Vancouver, Canada},
abstract = {Obsessive-compulsive disorder (OCD) is an anxiety-based disorder that affects around 2.5% of the population. A common treatment for OCD is exposure therapy, where the patient repeatedly confronts a feared experience, which has the long-term effect of decreasing their anxiety. Some exposures consist of reading and writing stories about an imagined anxiety-provoking scenario. In this paper, we present a technology that enables patients to interactively contribute to exposure stories by supplying natural language input (typed or spoken) that advances a scenario. This interactivity could potentially increase the patient’s sense of immersion in an exposure and contribute to its success. We introduce the NLP task behind processing inputs to predict new events in the scenario, and describe our initial approach. We then illustrate the future possibility of this work with an example of an exposure scenario authored with our application.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew S.; Swanson, Reid
Evaluating Story Generation Systems Using Automated Linguistic Analyses Proceedings Article
In: Proceedings of the SIGKDD-2017 Workshop on Machine Learning for Creativity, ACM, Halifax, Nova Scotia, Canada, 2017.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{roemmele_evaluating_2017,
title = {Evaluating Story Generation Systems Using Automated Linguistic Analyses},
author = {Melissa Roemmele and Andrew S. Gordon and Reid Swanson},
url = {http://people.ict.usc.edu/ roemmele/publications/fiction_generation.pdf},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGKDD-2017 Workshop on Machine Learning for Creativity},
publisher = {ACM},
address = {Halifax, Nova Scotia, Canada},
abstract = {Story generation is a well-recognized task in computational creativity research, but one that can be di cult to evaluate empirically. It is often ine cient and costly to rely solely on human feedback for judging the quality of generated stories. We address this by examining the use of linguistic analyses for automated evaluation, using metrics from existing work on predicting writing quality. We apply these metrics speci cally to story continuation, where a model is given the beginning of a story and generates the next sentence, which is useful for systems that interactively support authors' creativity in writing. We compare sentences generated by different existing models to human-authored ones according to the analyses. The results show some meaningful dfferences between the models, suggesting that this evaluation approach may be advantageous for future research.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Inoue, Naoya; Gordon, Andrew S.
A Scalable Weighted Max-SAT Implementation of Propositional Etcetera Abduction Proceedings Article
In: Proceedings of the 30th International Conference of the Florida AI Society (FLAIRS-30), AAAI Press, Marco Island, Florida, 2017.
Abstract | Links | BibTeX | Tags: Narrative
@inproceedings{inoue_scalable_2017,
title = {A Scalable Weighted Max-SAT Implementation of Propositional Etcetera Abduction},
author = {Naoya Inoue and Andrew S. Gordon},
url = {http://people.ict.usc.edu/ gordon/publications/FLAIRS17.PDF},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 30th International Conference of the Florida AI Society (FLAIRS-30)},
publisher = {AAAI Press},
address = {Marco Island, Florida},
abstract = {Recent advances in technology for abductive reasoning, or inference to the best explanation, encourage the application of abduction to real-life commonsense reasoning problems. This paper describes Etcetera Abduction, a new implementation of logical abduction that is both grounded in probability theory and optimized using contemporary linear programming solvers. We present a Weighted Max-SAT formulation of Etcetera Abduction, which allows us to exploit highly advanced technologies developed in the field of SAT and Operations Research. Our experiments demonstrate the scalability of our proposal on a large-scale synthetic benchmark that contains up to ten thousand axioms, using one of the stateof-the-art mathematical optimizers developed in these fields. This is the first work to evaluate a SAT-based approach to abductive reasoning at this scale. The inference engine we developed has been made publicly available.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S
Solving Interpretation Problems With Etcetera Abduction Proceedings Article
In: Proceedings of the Fifth Annual Conference on Advances in Cognitive Systems, 2014 Cognitive Systems Foundation, Troy, New York, 2017.
Abstract | Links | BibTeX | Tags: Narrative
@inproceedings{gordon_solving_2017,
title = {Solving Interpretation Problems With Etcetera Abduction},
author = {Andrew S Gordon},
url = {http://people.ict.usc.edu/ gordon/publications/ACS17.PDF},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Fifth Annual Conference on Advances in Cognitive Systems},
publisher = {2014 Cognitive Systems Foundation},
address = {Troy, New York},
abstract = {Among the most challenging problems in Artificial Intelligence are those that require human-like abilities to make sense of ambiguous observations, to interpret events in context given a wealth of life experiences and commonsense knowledge. In the 1990s, Jerry Hobbs and colleagues demonstrated how interpretation problems can be tackled with logical abduction, a combinatorial search for the best set of assumptions that logically entails the observations. Etcetera Abduction is a new approach to ranking assumptions by reifying the uncertainty of knowledge base axioms as etcetera literals, representing conditional and prior probabilities that can be combined through logical unification. In this invited talk, I will highlight some of the features of Etcetera Abduction that make it attractive compared to alternatives, and share my perspective on the role of logic-based reasoning given current trends in machine learning research.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Kobayashi, Sosuke; Inoue, Naoya; Gordon, Andrew M.
An RNN-based Binary Classifier for the Story Cloze Test Proceedings Article
In: Proceedings of the 2nd Workshop on Linking Models of Lexical, Sentential and Discourse-level Semantics, pp. 74–80, Association for Computational Linguistics, Valencia, Spain, 2017.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{roemmele_rnn-based_2017,
title = {An RNN-based Binary Classifier for the Story Cloze Test},
author = {Melissa Roemmele and Sosuke Kobayashi and Naoya Inoue and Andrew M. Gordon},
url = {http://www.aclweb.org/anthology/W/W17/W17-09.pdf#page=86},
year = {2017},
date = {2017-04-01},
booktitle = {Proceedings of the 2nd Workshop on Linking Models of Lexical, Sentential and Discourse-level Semantics},
pages = {74–80},
publisher = {Association for Computational Linguistics},
address = {Valencia, Spain},
abstract = {The Story Cloze Test consists of choosing a sentence that best completes a story given two choices. In this paper we present a system that performs this task using a supervised binary classifier on top of a recurrent neural network to predict the probability that a given story ending is correct. The classifier is trained to distinguish correct story endings given in the training data from incorrect ones that we artificially generate. Our experiments evaluate different methods for generating these negative examples, as well as different embedding-based representations of the stories. Our best result obtains 67.2% accuracy on the test set, outperforming the existing top baseline of 58.5%.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Dehghani, Morteza; Boghrati, Reihane; Man, Kingson; Hoover, Joseph; Gimbel, Sarah; Vaswani, Ashish; Zevin, Jason; Immordino, Mary Helen; Gordon, Andrew; Damasio, Antonio; Kaplan, Jonas T.
Decoding the Neural Representation of Story Meanings across Languages Journal Article
In: Human Brain Mapping, vol. 38, no. 12, 2017.
Abstract | Links | BibTeX | Tags: Narrative
@article{dehghani_decoding_2017,
title = {Decoding the Neural Representation of Story Meanings across Languages},
author = {Morteza Dehghani and Reihane Boghrati and Kingson Man and Joseph Hoover and Sarah Gimbel and Ashish Vaswani and Jason Zevin and Mary Helen Immordino and Andrew Gordon and Antonio Damasio and Jonas T. Kaplan},
url = {https://psyarxiv.com/qrpp3/},
doi = {10.1002/hbm.23814},
year = {2017},
date = {2017-03-01},
journal = {Human Brain Mapping},
volume = {38},
number = {12},
abstract = {Drawing from a common lexicon of semantic units, humans fashion narratives whose meaning transcends that of their individual utterances. However, while brain regions that represent lower-level semantic units, such as words and sentences, have been identified, questions remain about the neural representation of narrative comprehension, which involves inferring cumulative meaning. To address these questions, we exposed English, Mandarin and Farsi native speakers to native language translations of the same stories during fMRI scanning. Using a new technique in natural language processing, we calculated the distributed representations of these stories (capturing the meaning of the stories in high-dimensional semantic space), and demonstrate that using these representations we can identify the specific story a participant was reading from the neural data. Notably, this was possible even when the distributed representations were calculated using stories in a different language than the participant was reading. Relying on over 44 billion classifications, our results reveal that identification relied on a collection of brain regions most prominently located in the default mode network. These results demonstrate that neuro-semantic encoding of narratives happens at levels higher than individual semantic units and that this encoding is systematic across both individuals and languages.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {article}
}
2016
Nack, Frank; Gordon, Andrew S. (Ed.)
Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-48278-1 978-3-319-48279-8.
Abstract | Links | BibTeX | Tags: Narrative
@book{nack_interactive_2016,
title = {Interactive Storytelling},
editor = {Frank Nack and Andrew S. Gordon},
url = {http://link.springer.com/10.1007/978-3-319-48279-8},
doi = {10.1007/978-3-319-48279-8},
isbn = {978-3-319-48278-1 978-3-319-48279-8},
year = {2016},
date = {2016-11-01},
volume = {10045},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
series = {Lecture Notes in Computer Science},
abstract = {This book constitutes the refereed proceedings of the 9th International Conference on Interactive Digital Storytelling, ICIDS 2016, held in Los Angeles, CA, USA, in November 2016. The 26 revised full papers and 8 short papers presented together with 9 posters, 4 workshop, and 3 demonstration papers were carefully reviewed and selected from 88 submissions. The papers are organized in topical sections on analyses and evaluation systems; brave new ideas; intelligent narrative technologies; theoretical foundations; and usage scenarios and applications.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {book}
}
Ryan, James; Swanson, Reid
Recognizing Coherent Narrative Blog Content Proceedings Article
In: Proceeedings of the International Conference on Interactive Digital Storytelling, pp. 234–246, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-48278-1 978-3-319-48279-8.
Abstract | Links | BibTeX | Tags: Narrative
@inproceedings{ryan_recognizing_2016,
title = {Recognizing Coherent Narrative Blog Content},
author = {James Ryan and Reid Swanson},
url = {http://link.springer.com/10.1007/978-3-319-48279-8_21},
doi = {10.1007/978-3-319-48279-8_21},
isbn = {978-3-319-48278-1 978-3-319-48279-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceeedings of the International Conference on Interactive Digital Storytelling},
pages = {234–246},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Interactive storytelling applications have at their disposal massive numbers of human-authored stories, in the form of narrative weblog posts, from which story content could be harvested and repurposed. Such repurposing is currently inhibited, however, in that many blog narratives are not sufficiently coherent for use in these applications. In a narrative that is not coherent, the order of the events in the narrative is not clear given the text of the story. We present the results of a study exploring automatic methods for estimating the coherence of narrative blog posts. In the end, our simplest model—one that only considers the degree to which story text is capitalized and punctuated—vastly outperformed a baseline model and, curiously, a series of more sophisticated models. Future work may use this simple model as a baseline, or may use it along with the classifier that it extends to automatically extract large numbers of narrative blog posts from the web for purposes such as interactive storytelling.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahn, Emily; Morbini, Fabrizio; Gordon, Andrew S.
Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing Proceedings Article
In: Proceedings of the 9th International Natural Language Generation Conference (INLG-2016), Edinburgh, UK, 2016.
Abstract | Links | BibTeX | Tags: Narrative, Virtual Humans
@inproceedings{ahn_improving_2016,
title = {Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing},
author = {Emily Ahn and Fabrizio Morbini and Andrew S. Gordon},
url = {https://www.researchgate.net/publication/307512031_Improving_Fluency_in_Narrative_Text_Generation_With_Grammatical_Transformations_and_Probabilistic_Parsing},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 9th International Natural Language Generation Conference (INLG-2016)},
address = {Edinburgh, UK},
abstract = {In research on automatic generation of narrative text, story events are often formally represented as a causal graph. When serializing and realizing this causal graph as natural language text, simple approaches produce cumbersome sentences with repetitive syntactic structure, e.g. long chains of “because” clauses. In our research, we show that the fluency of narrative text generated from causal graphs can be improved by applying rule-based grammatical transformations to generate many sentence variations with equivalent semantics, then selecting the variation that has the highest probability using a probabilistic syntactic parser. We evaluate our approach by generating narrative text from causal graphs that encode 100 brief stories involving the same three characters, based on a classic film of experimental social psychology. Crowdsourced workers judged the writing quality of texts generated with ranked transformations as significantly higher than those without, and not significantly lower than human-authored narratives of the same situations.},
keywords = {Narrative, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Morgens, Soja-Marie; Gordon, Andrew S.; Morency, Louis-Philippe
Recognizing Human Actions in the Motion Trajectories of Shapes Proceedings Article
In: Proceedings of ACM Intelligent User Interfaces, pp. 271–281, ACM Press, Sonoma, CA, 2016, ISBN: 978-1-4503-4137-0.
Abstract | Links | BibTeX | Tags: Narrative
@inproceedings{roemmele_recognizing_2016,
title = {Recognizing Human Actions in the Motion Trajectories of Shapes},
author = {Melissa Roemmele and Soja-Marie Morgens and Andrew S. Gordon and Louis-Philippe Morency},
url = {http://dl.acm.org/citation.cfm?id=2856793},
doi = {10.1145/2856767.2856793},
isbn = {978-1-4503-4137-0},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of ACM Intelligent User Interfaces},
pages = {271–281},
publisher = {ACM Press},
address = {Sonoma, CA},
abstract = {People naturally anthropomorphize the movement of nonliving objects, as social psychologists Fritz Heider and Marianne Simmel demonstrated in their influential 1944 research study. When they asked participants to narrate an animated film of two triangles and a circle moving in and around a box, participants described the shapes' movement in terms of human actions. Using a framework for authoring and annotating animations in the style of Heider and Simmel, we established new crowdsourced datasets where the motion trajectories of animated shapes are labeled according to the actions they depict. We applied two machine learning approaches, a spatial-temporal bag-of-words model and a recurrent neural network, to the task of automatically recognizing actions in these datasets. Our best results outperformed a majority baseline and showed similarity to human performance, which encourages further use of these datasets for modeling perception from motion trajectories. Future progress on simulating human-like motion perception will require models that integrate motion information with top-down contextual knowledge.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa
Writing Stories with Help from Recurrent Neural Networks Proceedings Article
In: AAAI Conference on Artificial Intelligence; Thirtieth AAAI Conference on Artificial Intelligence, pp. 4311 – 4312, AAAI Press, Phoenix, AZ, 2016.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{roemmele_writing_2016,
title = {Writing Stories with Help from Recurrent Neural Networks},
author = {Melissa Roemmele},
url = {http://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/11966},
year = {2016},
date = {2016-02-01},
booktitle = {AAAI Conference on Artificial Intelligence; Thirtieth AAAI Conference on Artificial Intelligence},
pages = {4311 – 4312},
publisher = {AAAI Press},
address = {Phoenix, AZ},
abstract = {This thesis explores the use of a recurrent neural network model for a novel story generation task. In this task, the model analyzes an ongoing story and generates a sentence that continues the story.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Commonsense Interpretation of Triangle Behavior Proceedings Article
In: Thirtieth AAAI Conference on Artificial Intelligence, AAAI Press, Phoenix, AZ, 2016.
Abstract | Links | BibTeX | Tags: Narrative
@inproceedings{gordon_commonsense_2016,
title = {Commonsense Interpretation of Triangle Behavior},
author = {Andrew S. Gordon},
url = {https://www.aaai.org/ocs/index.php/AAAI/AAAI16/rt/metadata/11790/12152},
year = {2016},
date = {2016-02-01},
booktitle = {Thirtieth AAAI Conference on Artificial Intelligence},
publisher = {AAAI Press},
address = {Phoenix, AZ},
abstract = {The ability to infer intentions, emotions, and other unobservable psychological states from people’s behavior is a hallmark of human social cognition, and an essential capability for future Artificial Intelligence systems. The commonsense theories of psychology and sociology necessary for such inferences have been a focus of logic-based knowledge representation research, but have been difficult to employ in robust automated reasoning architectures. In this paper we model behavior interpretation as a process of logical abduction, where the reasoning task is to identify the most probable set of assumptions that logically entail the observable behavior of others, given commonsense theories of psychology and sociology. We evaluate our approach using Triangle-COPA, a benchmark suite of 100 challenge problems based on an early social psychology experiment by Fritz Heider and Marianne Simmel. Commonsense knowledge of actions, social relationships, intentions, and emotions are encoded as defeasible axioms in first-order logic. We identify sets of assumptions that logically entail observed behaviors by backchaining with these axioms to a given depth, and order these sets by their joint probability assuming conditional independence. Our approach solves almost all (91) of the 100 questions in Triangle-COPA, and demonstrates a promising approach to robust behavior interpretation that integrates both logical and probabilistic reasoning.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {inproceedings}
}
Kaplan, Jonas T.; Gimbel, Sarah I.; Dehghani, Morteza; Immordino-Yang, Mary Helen; Sagae, Kenji; Wong, Jennifer D.; Tipper, Christine M.; Damasio, Hanna; Gordon, Andrew S.; Damasio, Antonio
Processing Narratives Concerning Protected Values: A Cross-Cultural Investigation of Neural Correlates Journal Article
In: Cerebral Cortex, 2016, ISSN: 1047-3211, 1460-2199.
Abstract | Links | BibTeX | Tags: Narrative, Virtual Humans
@article{kaplan_processing_2016,
title = {Processing Narratives Concerning Protected Values: A Cross-Cultural Investigation of Neural Correlates},
author = {Jonas T. Kaplan and Sarah I. Gimbel and Morteza Dehghani and Mary Helen Immordino-Yang and Kenji Sagae and Jennifer D. Wong and Christine M. Tipper and Hanna Damasio and Andrew S. Gordon and Antonio Damasio},
url = {http://www.cercor.oxfordjournals.org/lookup/doi/10.1093/cercor/bhv325},
doi = {10.1093/cercor/bhv325},
issn = {1047-3211, 1460-2199},
year = {2016},
date = {2016-01-01},
journal = {Cerebral Cortex},
abstract = {Narratives are an important component of culture and play a central role in transmitting social values. Little is known, however, about how the brain of a listener/reader processes narratives. A receiver's response to narration is influenced by the narrator's framing and appeal to values. Narratives that appeal to “protected values,” including core personal, national, or religious values, may be particularly effective at influencing receivers. Protected values resist compromise and are tied with identity, affective value, moral decision-making, and other aspects of social cognition. Here, we investigated the neural mechanisms underlying reactions to protected values in narratives. During fMRI scanning, we presented 78 American, Chinese, and Iranian participants with real-life stories distilled from a corpus of over 20 million weblogs. Reading these stories engaged the posterior medial, medial prefrontal, and temporo-parietal cortices. When participants believed that the protagonist was appealing to a protected value, signal in these regions was increased compared with when no protected value was perceived, possibly reflecting the intensive and iterative search required to process this material. The effect strength also varied across groups, potentially reflecting cultural differences in the degree of concern for protected values.},
keywords = {Narrative, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}