Publications
Search
Johnson, Emmanuel; Gratch, Jonathan
The Impact of Personalized Feedback on Negotiation Training Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. Volume 9, pp. 92–104, US Army Combat Capabilities Development Command–Soldier Center, 2022.
@incollection{johnson_impact_2022,
title = {The Impact of Personalized Feedback on Negotiation Training},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://adlnet.gov/assets/uploads/Vol%209_CompetencyBasedScenarioDesignBook_Complete_Final_021722v2.pdf#page=93},
year = {2022},
date = {2022-02-01},
urldate = {2022-02-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {Volume 9},
pages = {92–104},
publisher = {US Army Combat Capabilities Development Command–Soldier Center},
series = {Competency, Based Scenario Design},
abstract = {Intelligent tutoring systems (ITSs) have made great strides in teaching cognitive skills, including math (Koedinger et al., 1997; Koedinger & Corbett, 2005; Koedinger & Corbett, 2006), reading (Mills-Tettey, et al., 2009; Wijekumar et al., 2005;) and computer literacy (Guo, 2015; Olney et al., 2017;). Recent research has begun to extend these techniques to interpersonal skills such as public speaking (Chollet et al., 2014), medical interviews (Pataki, 2012; Stevens, 2006), collaborative problem solving (Graesser et al., 2018) and negotiation (Gratch et al., 2016; Kim et al., 2009). An extensive body of research has documented the benefits of ITSs for cognitive skill development, but relative to this, research on ITSs for interpersonal skills is still in its infancy. This chapter highlights our efforts in adapting ITS techniques to teaching negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Proceedings Article
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Lukin, Stephanie M.; Tratz, Stephen; Artstein, Ron; Traum, David; Voss, Clare R.
Augmenting Abstract Meaning Representation for Human-Robot Dialogue Proceedings Article
In: Proceedings of the First International Workshop on Designing Meaning Representations (DMR), pp. 199–210, Association of Computational Linguistics, Florence, Italy, 2019.
@inproceedings{bonial_augmenting_2019,
title = {Augmenting Abstract Meaning Representation for Human-Robot Dialogue},
author = {Claire Bonial and Lucia Donatelli and Stephanie M. Lukin and Stephen Tratz and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/W19-3322},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the First International Workshop on Designing Meaning Representations (DMR)},
pages = {199–210},
publisher = {Association of Computational Linguistics},
address = {Florence, Italy},
abstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Traum, David
Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies Proceedings Article
In: Proceedings of the 9th Language and Technology Conference, LTC, Poznań, Poland, 2019.
@inproceedings{chaffey_developing_2019,
title = {Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and David Traum},
url = {http://www-scf.usc.edu/ nasihati/publications/HLTCEM_2019.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 9th Language and Technology Conference},
publisher = {LTC},
address = {Poznań, Poland},
abstract = {Search and rescue missions involving robots face multiple challenges. The ratio of operators to robots is frequently one to one or higher, operators tasked with robots must contend with cognitive overload for long periods, and the robots themselves may be discomfiting to located survivors. To improve on the current state, we propose a swarm of robots equipped with natural language abilities and guided by a central virtual “spokesperson” able to access “plays”. The spokesperson may assist the operator with tasking the robots in their exploration of a zone, which allows the operator to maintain a safe distance. The use of multiple robots enables rescue personnel to cover a larger swath of ground, and the natural language component allows the robots to communicate with survivors located on site. This capability frees the operator to handle situations requiring personal attention, and overall can accelerate the location and assistance of survivors. In order to develop this system, we are creating a virtual reality simulation, in order to conduct a study and analysis of how humans communicate with these swarms of robots. The data collected from this experiment will inform how to best design emergency response swarm robots that are effectively able to communicate with the humans around them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dennison, Mark S.; Krum, David M.
Unifying Research to Address Motion Sickness Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 1858–1859, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72811-377-7.
@inproceedings{dennison_unifying_2019,
title = {Unifying Research to Address Motion Sickness},
author = {Mark S. Dennison and David M. Krum},
url = {https://ieeexplore.ieee.org/document/8798297/},
doi = {10.1109/VR.2019.8798297},
isbn = {978-1-72811-377-7},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
pages = {1858–1859},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Be it discussed as cybersickness, immersive sickness, simulator sickness, or virtual reality sickness, the ill effects of visuo-vestibular mismatch in immersive environments are of great concern for the wider adoption of virtual reality and related technologies. In this position paper, we discuss a unified research approach that may address motion sickness and identify critical research topics.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gordon, Carla; Sohail, Usman; Merchant, Chirag; Jones, Andrew; Campbell, Julia; Trimmer, Matthew; Bevington, Jeffrey; Engen, COL Christopher; Traum, David
Digital Survivor of Sexual Assault Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 417–425, ACM, Marina del Rey, California, 2019, ISBN: 978-1-4503-6272-6.
@inproceedings{artstein_digital_2019,
title = {Digital Survivor of Sexual Assault},
author = {Ron Artstein and Carla Gordon and Usman Sohail and Chirag Merchant and Andrew Jones and Julia Campbell and Matthew Trimmer and Jeffrey Bevington and COL Christopher Engen and David Traum},
url = {https://doi.org/10.1145/3301275.3302303},
doi = {10.1145/3301275.3302303},
isbn = {978-1-4503-6272-6},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {417–425},
publisher = {ACM},
address = {Marina del Rey, California},
abstract = {The Digital Survivor of Sexual Assault (DS2A) is an interface that allows a user to have a conversational experience with a survivor of sexual assault, using Artificial Intelligence technology and recorded videos. The application uses a statistical classifier to retrieve contextually appropriate pre-recorded video utterances by the survivor, together with dialogue management policies which enable users to conduct simulated conversations with the survivor about the sexual assault, its aftermath, and other pertinent topics. The content in the application has been specifically elicited to support the needs for the training of U.S. Army professionals in the Sexual Harassment/Assault Response and Prevention (SHARP) Program, and the application comes with an instructional support package. The system has been tested with approximately 200 users, and is presently being used in the SHARP Academy's capstone course.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Human Cooperation When Acting Through Autonomous Machines Journal Article
In: Proceedings of the National Academy of Sciences, vol. 116, no. 9, pp. 3482–3487, 2019, ISSN: 0027-8424, 1091-6490.
@article{de_melo_human_2019,
title = {Human Cooperation When Acting Through Autonomous Machines},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1817656116},
doi = {10.1073/pnas.1817656116},
issn = {0027-8424, 1091-6490},
year = {2019},
date = {2019-02-01},
journal = {Proceedings of the National Academy of Sciences},
volume = {116},
number = {9},
pages = {3482–3487},
abstract = {Recent times have seen an emergence of intelligent machines that act autonomously on our behalf, such as autonomous vehicles. Despite promises of increased efficiency, it is not clear whether this paradigm shift will change how we decide when our self-interest (e.g., comfort) is pitted against the collective interest (e.g., environment). Here we show that acting through machines changes the way people solve these social dilemmas and we present experimental evidence showing that participants program their autonomous vehicles to act more cooperatively than if they were driving themselves. We show that this happens because programming causes selfish short-term rewards to become less salient, leading to considerations of broader societal goals. We also show that the programmed behavior is influenced by past experience. Finally, we report evidence that the effect generalizes beyond the domain of autonomous vehicles. We discuss implications for designing autonomous machines that contribute to a more cooperative society},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chu, Veronica C.; Lucas, Gale M.; Lei, Su; Mozgai, Sharon; Khooshabeh, Peter; Gratch, Jonathan
Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat Journal Article
In: Frontiers in Human Neuroscience, vol. 13, 2019, ISSN: 1662-5161.
@article{chu_emotion_2019,
title = {Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat},
author = {Veronica C. Chu and Gale M. Lucas and Su Lei and Sharon Mozgai and Peter Khooshabeh and Jonathan Gratch},
url = {https://www.frontiersin.org/article/10.3389/fnhum.2019.00050/full},
doi = {10.3389/fnhum.2019.00050},
issn = {1662-5161},
year = {2019},
date = {2019-02-01},
journal = {Frontiers in Human Neuroscience},
volume = {13},
abstract = {The current study examines cooperation and cardiovascular responses in individuals that were defected on by their opponent in the first round of an iterated Prisoner’s Dilemma. In this scenario, participants were either primed with the emotion regulation strategy of reappraisal or no emotion regulation strategy, and their opponent either expressed an amused smile or a polite smile after the results were presented. We found that cooperation behavior decreased in the no emotion regulation group when the opponent expressed an amused smile compared to a polite smile. In the cardiovascular measures, we found significant differences between the emotion regulation conditions using the biopsychosocial (BPS) model of challenge and threat. However, the cardiovascular measures of participants instructed with the reappraisal strategy were only weakly comparable with a threat state of the BPS model, which involves decreased blood flow and perception of greater task demands than resources to cope with those demands. Conversely, the cardiovascular measures of participants without an emotion regulation were only weakly comparable with a challenge state of the BPS model, which involves increased blood flow and perception of having enough or more resources to cope with task demands.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fleming, Steven D; O’Banion, Matt S; McAlinden, Ryan; Oxendine, Christopher; Wright, William; Irmischer, Ian
Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations Journal Article
In: Annual Report (State and Future of GEOINT), pp. 5, 2019.
@article{fleming_rapid_2019,
title = {Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations},
author = {Steven D Fleming and Matt S O’Banion and Ryan McAlinden and Christopher Oxendine and William Wright and Ian Irmischer},
url = {http://trajectorymagazine.com/rapid-terrain-generation/},
year = {2019},
date = {2019-01-01},
journal = {Annual Report (State and Future of GEOINT)},
pages = {5},
abstract = {Geospecific 3D terrain representation (aka reality modeling) is revolutionizing geovisualization, simulation, and engineering practices around the world. In tandem with the rapid growth in unmanned aerial systems (UAS) and small satellites, reality modeling advancements now allow geospatial intelligence (GEOINT) practitioners to generate three-dimensional models from a decentralized collection of digital images to meet mission needs in both urban and rural environments. Scalable mesh models deliver enhanced, real-world visualization for engineers, geospatial teams, combatant, and combat support organizations. In this, reality modeling provides a detailed understanding of the physical environment, and models allow installation engineers and GEOINT practitioners to quickly generate updated, high-precision 3D reality meshes to provide real-world digital context for the decision-making process.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Marge, Matthew; Bonial, Claire; Lukin, Stephanie M.; Hayes, Cory J.; Foots, Ashley; Artstein, Ron; Henry, Cassidy; Pollard, Kimberly A.; Gordon, Carla; Gervits, Felix; Leuski, Anton; Hill, Susan G.; Voss, Clare R.; Traum, David
Balancing Efficiency and Coverage in Human-Robot Dialogue Collection Proceedings Article
In: Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction, arXiv, Arlington, Virginia, 2018.
@inproceedings{marge_balancing_2018,
title = {Balancing Efficiency and Coverage in Human-Robot Dialogue Collection},
author = {Matthew Marge and Claire Bonial and Stephanie M. Lukin and Cory J. Hayes and Ashley Foots and Ron Artstein and Cassidy Henry and Kimberly A. Pollard and Carla Gordon and Felix Gervits and Anton Leuski and Susan G. Hill and Clare R. Voss and David Traum},
url = {https://arxiv.org/abs/1810.02017},
year = {2018},
date = {2018-10-01},
booktitle = {Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction},
publisher = {arXiv},
address = {Arlington, Virginia},
abstract = {We describe a multi-phased Wizard-of-Oz approach to collecting human-robot dialogue in a collaborative search and navigation task. The data is being used to train an initial automated robot dialogue system to support collaborative exploration tasks. In the first phase, a wizard freely typed robot utterances to human participants. For the second phase, this data was used to design a GUI that includes buttons for the most common communications, and templates for communications with varying parameters. Comparison of the data gathered in these phases show that the GUI enabled a faster pace of dialogue while still maintaining high coverage of suitable responses, enabling more efficient targeted data collection, and improvements in natural language understanding using GUI-collected data. As a promising first step towardsinteractivelearning,thisworkshowsthatourapproach enables the collection of useful training data for navigationbased HRI tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Goldberg, Benjamin; Nye, Benjamin; Lane, H Chad; Guadagnoli, Mark
Team Assessment and Pedagogy as Informed by Sports Coaching and Assessment Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 6-Team Modeling, pp. 105–119, US Army Research Laboratory (ARL), Orlando, Florida, 2018, ISBN: 978-0-9977257-4-2.
@incollection{goldberg_team_2018,
title = {Team Assessment and Pedagogy as Informed by Sports Coaching and Assessment},
author = {Benjamin Goldberg and Benjamin Nye and H Chad Lane and Mark Guadagnoli},
url = {https://gifttutoring.org/attachments/download/3029/Design%20Recommendations%20for%20ITS_Volume%206%20-%20Team%20Tutoring_final.pdf},
isbn = {978-0-9977257-4-2},
year = {2018},
date = {2018-08-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 6-Team Modeling},
pages = {105–119},
publisher = {US Army Research Laboratory (ARL)},
address = {Orlando, Florida},
abstract = {In this chapter, we consider pedagogical insights offered by three different sources of information from sports coaching and assessment: published reports of sports training, first-hand accounts of team training, and a review of assessment approaches for measuring team performance. These issues are considered in the context of an integrated taxonomy of feedback that considers when feedback was given, who it was given to (e.g., individual vs. team), the type of feedback (e.g., positive vs. negative), and the specificity of feedback (e.g., detailed issues vs. brief note). The goal of this work is to consider how these patterns might generalize to a wider range of learning tasks, to improve both learning and assessment of team performance.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Melo, Celso M.; Khooshabeh, Peter; Amir, Ori; Gratch, Jonathan
Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing Proceedings Article
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 2224–2226, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
@inproceedings{de_melo_shaping_2018,
title = {Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing},
author = {Celso M. Melo and Peter Khooshabeh and Ori Amir and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3238129},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {2224–2226},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Emotion expressions can help solve social dilemmas where individual interest is pitted against the collective interest. Building on research that shows that emotions communicate intentions to others, we reinforce that people can infer whether emotionally expressive computer agents intend to cooperate or compete. We further show important distinctions between computer agents that are perceived to be driven by humans (i.e., avatars) vs. by algorithms (i.e., agents). Our results reveal that, when the emotion expression reflects an intention to cooperate, participants will cooperate more with avatars than with agents; however, when the emotion reflects an intention to compete, participants cooperate just as little with avatars as with agents. Finally, we present first evidence that the way the dilemma is described - or framed - can influence people's decision-making. We discuss implications for the design of autonomous agents that foster cooperation with humans, beyond what game theory predicts in social dilemmas.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Rovira, Ericka; Barnes, Michael J.
Clustering Behavior to Recognize Subjective Beliefs in Human-Agent Teams Proceedings Article
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1495–1503, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
@inproceedings{pynadath_clustering_2018,
title = {Clustering Behavior to Recognize Subjective Beliefs in Human-Agent Teams},
author = {David V. Pynadath and Ning Wang and Ericka Rovira and Michael J. Barnes},
url = {https://dl.acm.org/citation.cfm?id=3237923},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {1495–1503},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Trust is critical to the success of human-agent teams, and a critical antecedents to trust is transparency. To best interact with human teammates, an agent explain itself so that they understand its decision-making process. However, individual differences among human teammates require that the agent dynamically adjust its explanation strategy based on their unobservable subjective beliefs. The agent must therefore recognize its teammates' subjective beliefs relevant to trust-building (e.g., their understanding of the agent's capabilities and process). We leverage a nonparametric method to enable an agent to use its history of prior interactions as a means for recognizing and predicting a new teammate's subjective beliefs. We first gather data combining observable behavior sequences with survey-based observations of typically unobservable perceptions. We then use a nearest-neighbor approach to identify the prior teammates most similar to the new one. We use these neighbors' responses to infer the likelihood of possible beliefs, as in collaborative filtering. The results provide insights into the types of beliefs that are easy (and hard) to infer from purely behavioral observations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Marge, Matthew; Henry, Cassidy; Artstein, Ron; Traum, David; Voss, Clare R.
Consequences and Factors of Stylistic Differences in Human-Robot Dialogue Proceedings Article
In: Proceedings of the SIGDIAL 2018 Conference, pp. 110–118, Association for Computational Linguistics, Melbourne, Australia, 2018.
@inproceedings{lukin_consequences_2018,
title = {Consequences and Factors of Stylistic Differences in Human-Robot Dialogue},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Matthew Marge and Cassidy Henry and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/papers/W/W18/W18-5012/},
doi = {10.18653/v1/W18-5012},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the SIGDIAL 2018 Conference},
pages = {110–118},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {This paper identifies stylistic differences in instruction-giving observed in a corpus of human-robot dialogue. Differences in verbosity and structure (i.e., single-intent vs. multi-intent instructions) arose naturally without restrictions or prior guidance on how users should speak with the robot. Different styles were found to produce different rates of miscommunication, and correlations were found between style differences and individual user variation, trust, and interaction experience with the robot. Understanding potential consequences and factors that influence style can inform design of dialogue systems that are robust to natural variation from human users.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Schwartz, David; Goldberg, Stephen L.
An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger Proceedings Article
In: Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE), pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
@inproceedings{wang_analysis_2018,
title = {An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and David Schwartz and Stephen L. Goldberg},
url = {http://ceur-ws.org/Vol-2141/paper3.pdf},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE)},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. Using a low-cost and high-speed computer graphics and character animation technology, we created digital doppelgangers of students and placed them in a learning-byexplaining task where they interacted with digital doppelgangers of themselves. We investigate the research question of how does increasing the similarity of the physical appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual human listener in a learning-by-explaining paradigm. It presents an analysis of how students’ perceptions of the resemblance impact their learning experience and outcomes. The analysis and results offer insight into the promise and limitation of the application of this novel technology to pedagogical agents research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Barnes, Michael J.; Wang, Ning; Chen, Jessie Y. C.
Transparency Communication for Machine Learning in Human-Automation Interaction Book Section
In: Human and Machine Learning, pp. 75–90, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-90402-3 978-3-319-90403-0.
@incollection{pynadath_transparency_2018,
title = {Transparency Communication for Machine Learning in Human-Automation Interaction},
author = {David V. Pynadath and Michael J. Barnes and Ning Wang and Jessie Y. C. Chen},
url = {http://link.springer.com/10.1007/978-3-319-90403-0_5},
doi = {10.1007/978-3-319-90403-0_5},
isbn = {978-3-319-90402-3 978-3-319-90403-0},
year = {2018},
date = {2018-06-01},
booktitle = {Human and Machine Learning},
pages = {75–90},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Technological advances offer the promise of autonomous systems to form human-machine teams that are more capable than their individual members. Understanding the inner workings of the autonomous systems, especially as machine-learning (ML) methods are being widely applied to the design of such systems, has become increasingly challenging for the humans working with them. The “black-box” nature of quantitative ML approaches poses an impediment to people’s situation awareness (SA) of these ML-based systems, often resulting in either disuse or over-reliance of autonomous systems employing such algorithms. Research in human-automation interaction has shown that transparency communication can improve teammates’ SA, foster the trust relationship, and boost the human-automation team’s performance. In this chapter, we will examine the implications of an agent transparency model for human interactions with ML-based agents using automated explanations. We will discuss the application of a particular ML method, reinforcement learning (RL), in Partially Observable Markov Decision Process (POMDP)-based agents, and the design of explanation algorithms for RL in POMDPs.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Pynadath, David V.; Wang, Ning; Rovira, Ericka; Barnes, Michael J.
A Nearest-Neighbor Approach to Recognizing Subjective Beliefs in Human-Robot Interaction Proceedings Article
In: Proceedings of The AAAI Workshop on Plan, Activity, and Intent Recognition (PAIR), Association for the Advancement of Artificial Intelligence, London, UK, 2018.
@inproceedings{pynadath_nearest-neighbor_2018,
title = {A Nearest-Neighbor Approach to Recognizing Subjective Beliefs in Human-Robot Interaction},
author = {David V. Pynadath and Ning Wang and Ericka Rovira and Michael J. Barnes},
url = {https://aied2018.utscic.edu.au/proceedings/},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of The AAAI Workshop on Plan, Activity, and Intent Recognition (PAIR)},
publisher = {Association for the Advancement of Artificial Intelligence},
address = {London, UK},
abstract = {Trust is critical to the success of human-robot interaction (HRI), and one of the critical antecedents to trust is transparency. To best interact with human teammates, a robot must be able to ensure that they understand its decision-making process. Recent work has developed automated explanation methods that can achieve this goal. However, individual differences among human teammates require that the robot dynamically adjust its explanation strategy based on their unobservable subjective beliefs. We therefore need methods by which a robot can recognize its teammates’ subjective beliefs relevant to trust-building (e.g., their understanding of the robot’s capabilities and process). We leverage a nonparametric method, common across many fields of artificial intelligence, to enable a robot to use its history of prior interactions as a means for recognizing and predicting a new teammate’s subjective beliefs. We first gather data combining observable behavior sequences with surveybased observations of typically unobservable subjective beliefs. We then use a nearest-neighbor approach to identify the prior teammates most similar to the new one. We use these neighbors to infer the likelihood of possible subjective beliefs, and the results provide insights into the types of subjective beliefs that are easy (and hard) to infer from purely behavioral observations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen
Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing Proceedings Article
In: Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018, pp. 17–22, ACM Press, Beijing, China, 2018, ISBN: 978-1-4503-6376-1.
@inproceedings{kang_socio-cultural_2018,
title = {Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang},
url = {http://dl.acm.org/citation.cfm?doid=3205326.3205348},
doi = {10.1145/3205326.3205348},
isbn = {978-1-4503-6376-1},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018},
pages = {17–22},
publisher = {ACM Press},
address = {Beijing, China},
abstract = {We explored how users perceive virtual characters that performed the role of a counseling interviewer, while presenting different levels of social class, as well as single or multi-tasking behavior. To investigate this subject, we designed a 2x2 experiment (tasking type and social class of the virtual counseling interviewer). In the experiment, participants experienced the counseling interview interactions over video conferencing on a smartphone. We measured user responses to and perceptions of the virtual human interviewer. The results demonstrate that the tasking types and social class of the virtual counselor affected user responses to and perceptions of the virtual counselor. The results offer insight into the design and development of effective, realistic, and believable virtual human counselors. Furthermore, the results also address current social questions about how smartphones might mediate social interactions, including human-agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Merchant, Chirag; Schwartz, David; Goldberg, Stephen L.
Learning by Explaining to a Digital Doppelganger Book Section
In: Intelligent Tutoring Systems, vol. 10858, pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
@incollection{wang_learning_2018,
title = {Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and Chirag Merchant and David Schwartz and Stephen L. Goldberg},
url = {http://link.springer.com/10.1007/978-3-319-91464-0_25},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-05-01},
booktitle = {Intelligent Tutoring Systems},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. An emerging computer animation technology makes the creation of digital doppelgangers an accessible reality. This allows researchers in pedagogical agents to explore previously unexplorable research questions, such as how does increasing the similarity in appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual listener in a learning-by-explaining paradigm. Results offer insight into the promise and limitation of this novel technology.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Filter
2022
Johnson, Emmanuel; Gratch, Jonathan
The Impact of Personalized Feedback on Negotiation Training Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. Volume 9, pp. 92–104, US Army Combat Capabilities Development Command–Soldier Center, 2022.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@incollection{johnson_impact_2022,
title = {The Impact of Personalized Feedback on Negotiation Training},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://adlnet.gov/assets/uploads/Vol%209_CompetencyBasedScenarioDesignBook_Complete_Final_021722v2.pdf#page=93},
year = {2022},
date = {2022-02-01},
urldate = {2022-02-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {Volume 9},
pages = {92–104},
publisher = {US Army Combat Capabilities Development Command–Soldier Center},
series = {Competency, Based Scenario Design},
abstract = {Intelligent tutoring systems (ITSs) have made great strides in teaching cognitive skills, including math (Koedinger et al., 1997; Koedinger & Corbett, 2005; Koedinger & Corbett, 2006), reading (Mills-Tettey, et al., 2009; Wijekumar et al., 2005;) and computer literacy (Guo, 2015; Olney et al., 2017;). Recent research has begun to extend these techniques to interpersonal skills such as public speaking (Chollet et al., 2014), medical interviews (Pataki, 2012; Stevens, 2006), collaborative problem solving (Graesser et al., 2018) and negotiation (Gratch et al., 2016; Kim et al., 2009). An extensive body of research has documented the benefits of ITSs for cognitive skill development, but relative to this, research on ITSs for interpersonal skills is still in its infancy. This chapter highlights our efforts in adapting ITS techniques to teaching negotiation.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
2020
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARL, ARO-Coop, DoD, UARC, Virtual Humans
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {ARL, ARO-Coop, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Proceedings Article
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Bonial, Claire; Donatelli, Lucia; Lukin, Stephanie M.; Tratz, Stephen; Artstein, Ron; Traum, David; Voss, Clare R.
Augmenting Abstract Meaning Representation for Human-Robot Dialogue Proceedings Article
In: Proceedings of the First International Workshop on Designing Meaning Representations (DMR), pp. 199–210, Association of Computational Linguistics, Florence, Italy, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{bonial_augmenting_2019,
title = {Augmenting Abstract Meaning Representation for Human-Robot Dialogue},
author = {Claire Bonial and Lucia Donatelli and Stephanie M. Lukin and Stephen Tratz and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/W19-3322},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the First International Workshop on Designing Meaning Representations (DMR)},
pages = {199–210},
publisher = {Association of Computational Linguistics},
address = {Florence, Italy},
abstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Traum, David
Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies Proceedings Article
In: Proceedings of the 9th Language and Technology Conference, LTC, Poznań, Poland, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_developing_2019,
title = {Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and David Traum},
url = {http://www-scf.usc.edu/ nasihati/publications/HLTCEM_2019.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 9th Language and Technology Conference},
publisher = {LTC},
address = {Poznań, Poland},
abstract = {Search and rescue missions involving robots face multiple challenges. The ratio of operators to robots is frequently one to one or higher, operators tasked with robots must contend with cognitive overload for long periods, and the robots themselves may be discomfiting to located survivors. To improve on the current state, we propose a swarm of robots equipped with natural language abilities and guided by a central virtual “spokesperson” able to access “plays”. The spokesperson may assist the operator with tasking the robots in their exploration of a zone, which allows the operator to maintain a safe distance. The use of multiple robots enables rescue personnel to cover a larger swath of ground, and the natural language component allows the robots to communicate with survivors located on site. This capability frees the operator to handle situations requiring personal attention, and overall can accelerate the location and assistance of survivors. In order to develop this system, we are creating a virtual reality simulation, in order to conduct a study and analysis of how humans communicate with these swarms of robots. The data collected from this experiment will inform how to best design emergency response swarm robots that are effectively able to communicate with the humans around them.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Dennison, Mark S.; Krum, David M.
Unifying Research to Address Motion Sickness Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 1858–1859, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72811-377-7.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC
@inproceedings{dennison_unifying_2019,
title = {Unifying Research to Address Motion Sickness},
author = {Mark S. Dennison and David M. Krum},
url = {https://ieeexplore.ieee.org/document/8798297/},
doi = {10.1109/VR.2019.8798297},
isbn = {978-1-72811-377-7},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
pages = {1858–1859},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Be it discussed as cybersickness, immersive sickness, simulator sickness, or virtual reality sickness, the ill effects of visuo-vestibular mismatch in immersive environments are of great concern for the wider adoption of virtual reality and related technologies. In this position paper, we discuss a unified research approach that may address motion sickness and identify critical research topics.},
keywords = {ARL, DoD, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gordon, Carla; Sohail, Usman; Merchant, Chirag; Jones, Andrew; Campbell, Julia; Trimmer, Matthew; Bevington, Jeffrey; Engen, COL Christopher; Traum, David
Digital Survivor of Sexual Assault Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 417–425, ACM, Marina del Rey, California, 2019, ISBN: 978-1-4503-6272-6.
Abstract | Links | BibTeX | Tags: DoD, Graphics, MedVR, UARC, Virtual Humans
@inproceedings{artstein_digital_2019,
title = {Digital Survivor of Sexual Assault},
author = {Ron Artstein and Carla Gordon and Usman Sohail and Chirag Merchant and Andrew Jones and Julia Campbell and Matthew Trimmer and Jeffrey Bevington and COL Christopher Engen and David Traum},
url = {https://doi.org/10.1145/3301275.3302303},
doi = {10.1145/3301275.3302303},
isbn = {978-1-4503-6272-6},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {417–425},
publisher = {ACM},
address = {Marina del Rey, California},
abstract = {The Digital Survivor of Sexual Assault (DS2A) is an interface that allows a user to have a conversational experience with a survivor of sexual assault, using Artificial Intelligence technology and recorded videos. The application uses a statistical classifier to retrieve contextually appropriate pre-recorded video utterances by the survivor, together with dialogue management policies which enable users to conduct simulated conversations with the survivor about the sexual assault, its aftermath, and other pertinent topics. The content in the application has been specifically elicited to support the needs for the training of U.S. Army professionals in the Sexual Harassment/Assault Response and Prevention (SHARP) Program, and the application comes with an instructional support package. The system has been tested with approximately 200 users, and is presently being used in the SHARP Academy's capstone course.},
keywords = {DoD, Graphics, MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Human Cooperation When Acting Through Autonomous Machines Journal Article
In: Proceedings of the National Academy of Sciences, vol. 116, no. 9, pp. 3482–3487, 2019, ISSN: 0027-8424, 1091-6490.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@article{de_melo_human_2019,
title = {Human Cooperation When Acting Through Autonomous Machines},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1817656116},
doi = {10.1073/pnas.1817656116},
issn = {0027-8424, 1091-6490},
year = {2019},
date = {2019-02-01},
journal = {Proceedings of the National Academy of Sciences},
volume = {116},
number = {9},
pages = {3482–3487},
abstract = {Recent times have seen an emergence of intelligent machines that act autonomously on our behalf, such as autonomous vehicles. Despite promises of increased efficiency, it is not clear whether this paradigm shift will change how we decide when our self-interest (e.g., comfort) is pitted against the collective interest (e.g., environment). Here we show that acting through machines changes the way people solve these social dilemmas and we present experimental evidence showing that participants program their autonomous vehicles to act more cooperatively than if they were driving themselves. We show that this happens because programming causes selfish short-term rewards to become less salient, leading to considerations of broader societal goals. We also show that the programmed behavior is influenced by past experience. Finally, we report evidence that the effect generalizes beyond the domain of autonomous vehicles. We discuss implications for designing autonomous machines that contribute to a more cooperative society},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chu, Veronica C.; Lucas, Gale M.; Lei, Su; Mozgai, Sharon; Khooshabeh, Peter; Gratch, Jonathan
Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat Journal Article
In: Frontiers in Human Neuroscience, vol. 13, 2019, ISSN: 1662-5161.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, UARC, VHTL, Virtual Humans
@article{chu_emotion_2019,
title = {Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat},
author = {Veronica C. Chu and Gale M. Lucas and Su Lei and Sharon Mozgai and Peter Khooshabeh and Jonathan Gratch},
url = {https://www.frontiersin.org/article/10.3389/fnhum.2019.00050/full},
doi = {10.3389/fnhum.2019.00050},
issn = {1662-5161},
year = {2019},
date = {2019-02-01},
journal = {Frontiers in Human Neuroscience},
volume = {13},
abstract = {The current study examines cooperation and cardiovascular responses in individuals that were defected on by their opponent in the first round of an iterated Prisoner’s Dilemma. In this scenario, participants were either primed with the emotion regulation strategy of reappraisal or no emotion regulation strategy, and their opponent either expressed an amused smile or a polite smile after the results were presented. We found that cooperation behavior decreased in the no emotion regulation group when the opponent expressed an amused smile compared to a polite smile. In the cardiovascular measures, we found significant differences between the emotion regulation conditions using the biopsychosocial (BPS) model of challenge and threat. However, the cardiovascular measures of participants instructed with the reappraisal strategy were only weakly comparable with a threat state of the BPS model, which involves decreased blood flow and perception of greater task demands than resources to cope with those demands. Conversely, the cardiovascular measures of participants without an emotion regulation were only weakly comparable with a challenge state of the BPS model, which involves increased blood flow and perception of having enough or more resources to cope with task demands.},
keywords = {ARL, DoD, MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Fleming, Steven D; O’Banion, Matt S; McAlinden, Ryan; Oxendine, Christopher; Wright, William; Irmischer, Ian
Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations Journal Article
In: Annual Report (State and Future of GEOINT), pp. 5, 2019.
Abstract | Links | BibTeX | Tags: DoD, Simulation, STG
@article{fleming_rapid_2019,
title = {Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations},
author = {Steven D Fleming and Matt S O’Banion and Ryan McAlinden and Christopher Oxendine and William Wright and Ian Irmischer},
url = {http://trajectorymagazine.com/rapid-terrain-generation/},
year = {2019},
date = {2019-01-01},
journal = {Annual Report (State and Future of GEOINT)},
pages = {5},
abstract = {Geospecific 3D terrain representation (aka reality modeling) is revolutionizing geovisualization, simulation, and engineering practices around the world. In tandem with the rapid growth in unmanned aerial systems (UAS) and small satellites, reality modeling advancements now allow geospatial intelligence (GEOINT) practitioners to generate three-dimensional models from a decentralized collection of digital images to meet mission needs in both urban and rural environments. Scalable mesh models deliver enhanced, real-world visualization for engineers, geospatial teams, combatant, and combat support organizations. In this, reality modeling provides a detailed understanding of the physical environment, and models allow installation engineers and GEOINT practitioners to quickly generate updated, high-precision 3D reality meshes to provide real-world digital context for the decision-making process.},
keywords = {DoD, Simulation, STG},
pubstate = {published},
tppubtype = {article}
}
2018
Marge, Matthew; Bonial, Claire; Lukin, Stephanie M.; Hayes, Cory J.; Foots, Ashley; Artstein, Ron; Henry, Cassidy; Pollard, Kimberly A.; Gordon, Carla; Gervits, Felix; Leuski, Anton; Hill, Susan G.; Voss, Clare R.; Traum, David
Balancing Efficiency and Coverage in Human-Robot Dialogue Collection Proceedings Article
In: Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction, arXiv, Arlington, Virginia, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{marge_balancing_2018,
title = {Balancing Efficiency and Coverage in Human-Robot Dialogue Collection},
author = {Matthew Marge and Claire Bonial and Stephanie M. Lukin and Cory J. Hayes and Ashley Foots and Ron Artstein and Cassidy Henry and Kimberly A. Pollard and Carla Gordon and Felix Gervits and Anton Leuski and Susan G. Hill and Clare R. Voss and David Traum},
url = {https://arxiv.org/abs/1810.02017},
year = {2018},
date = {2018-10-01},
booktitle = {Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction},
publisher = {arXiv},
address = {Arlington, Virginia},
abstract = {We describe a multi-phased Wizard-of-Oz approach to collecting human-robot dialogue in a collaborative search and navigation task. The data is being used to train an initial automated robot dialogue system to support collaborative exploration tasks. In the first phase, a wizard freely typed robot utterances to human participants. For the second phase, this data was used to design a GUI that includes buttons for the most common communications, and templates for communications with varying parameters. Comparison of the data gathered in these phases show that the GUI enabled a faster pace of dialogue while still maintaining high coverage of suitable responses, enabling more efficient targeted data collection, and improvements in natural language understanding using GUI-collected data. As a promising first step towardsinteractivelearning,thisworkshowsthatourapproach enables the collection of useful training data for navigationbased HRI tasks.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Goldberg, Benjamin; Nye, Benjamin; Lane, H Chad; Guadagnoli, Mark
Team Assessment and Pedagogy as Informed by Sports Coaching and Assessment Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 6-Team Modeling, pp. 105–119, US Army Research Laboratory (ARL), Orlando, Florida, 2018, ISBN: 978-0-9977257-4-2.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, UARC
@incollection{goldberg_team_2018,
title = {Team Assessment and Pedagogy as Informed by Sports Coaching and Assessment},
author = {Benjamin Goldberg and Benjamin Nye and H Chad Lane and Mark Guadagnoli},
url = {https://gifttutoring.org/attachments/download/3029/Design%20Recommendations%20for%20ITS_Volume%206%20-%20Team%20Tutoring_final.pdf},
isbn = {978-0-9977257-4-2},
year = {2018},
date = {2018-08-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 6-Team Modeling},
pages = {105–119},
publisher = {US Army Research Laboratory (ARL)},
address = {Orlando, Florida},
abstract = {In this chapter, we consider pedagogical insights offered by three different sources of information from sports coaching and assessment: published reports of sports training, first-hand accounts of team training, and a review of assessment approaches for measuring team performance. These issues are considered in the context of an integrated taxonomy of feedback that considers when feedback was given, who it was given to (e.g., individual vs. team), the type of feedback (e.g., positive vs. negative), and the specificity of feedback (e.g., detailed issues vs. brief note). The goal of this work is to consider how these patterns might generalize to a wider range of learning tasks, to improve both learning and assessment of team performance.},
keywords = {ARL, DoD, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Melo, Celso M.; Khooshabeh, Peter; Amir, Ori; Gratch, Jonathan
Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing Proceedings Article
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 2224–2226, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{de_melo_shaping_2018,
title = {Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing},
author = {Celso M. Melo and Peter Khooshabeh and Ori Amir and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3238129},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {2224–2226},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Emotion expressions can help solve social dilemmas where individual interest is pitted against the collective interest. Building on research that shows that emotions communicate intentions to others, we reinforce that people can infer whether emotionally expressive computer agents intend to cooperate or compete. We further show important distinctions between computer agents that are perceived to be driven by humans (i.e., avatars) vs. by algorithms (i.e., agents). Our results reveal that, when the emotion expression reflects an intention to cooperate, participants will cooperate more with avatars than with agents; however, when the emotion reflects an intention to compete, participants cooperate just as little with avatars as with agents. Finally, we present first evidence that the way the dilemma is described - or framed - can influence people's decision-making. We discuss implications for the design of autonomous agents that foster cooperation with humans, beyond what game theory predicts in social dilemmas.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Rovira, Ericka; Barnes, Michael J.
Clustering Behavior to Recognize Subjective Beliefs in Human-Agent Teams Proceedings Article
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1495–1503, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{pynadath_clustering_2018,
title = {Clustering Behavior to Recognize Subjective Beliefs in Human-Agent Teams},
author = {David V. Pynadath and Ning Wang and Ericka Rovira and Michael J. Barnes},
url = {https://dl.acm.org/citation.cfm?id=3237923},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {1495–1503},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Trust is critical to the success of human-agent teams, and a critical antecedents to trust is transparency. To best interact with human teammates, an agent explain itself so that they understand its decision-making process. However, individual differences among human teammates require that the agent dynamically adjust its explanation strategy based on their unobservable subjective beliefs. The agent must therefore recognize its teammates' subjective beliefs relevant to trust-building (e.g., their understanding of the agent's capabilities and process). We leverage a nonparametric method to enable an agent to use its history of prior interactions as a means for recognizing and predicting a new teammate's subjective beliefs. We first gather data combining observable behavior sequences with survey-based observations of typically unobservable perceptions. We then use a nearest-neighbor approach to identify the prior teammates most similar to the new one. We use these neighbors' responses to infer the likelihood of possible beliefs, as in collaborative filtering. The results provide insights into the types of beliefs that are easy (and hard) to infer from purely behavioral observations.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Marge, Matthew; Henry, Cassidy; Artstein, Ron; Traum, David; Voss, Clare R.
Consequences and Factors of Stylistic Differences in Human-Robot Dialogue Proceedings Article
In: Proceedings of the SIGDIAL 2018 Conference, pp. 110–118, Association for Computational Linguistics, Melbourne, Australia, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{lukin_consequences_2018,
title = {Consequences and Factors of Stylistic Differences in Human-Robot Dialogue},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Matthew Marge and Cassidy Henry and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/papers/W/W18/W18-5012/},
doi = {10.18653/v1/W18-5012},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the SIGDIAL 2018 Conference},
pages = {110–118},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {This paper identifies stylistic differences in instruction-giving observed in a corpus of human-robot dialogue. Differences in verbosity and structure (i.e., single-intent vs. multi-intent instructions) arose naturally without restrictions or prior guidance on how users should speak with the robot. Different styles were found to produce different rates of miscommunication, and correlations were found between style differences and individual user variation, trust, and interaction experience with the robot. Understanding potential consequences and factors that influence style can inform design of dialogue systems that are robust to natural variation from human users.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Schwartz, David; Goldberg, Stephen L.
An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger Proceedings Article
In: Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE), pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC, Virtual Humans
@inproceedings{wang_analysis_2018,
title = {An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and David Schwartz and Stephen L. Goldberg},
url = {http://ceur-ws.org/Vol-2141/paper3.pdf},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE)},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. Using a low-cost and high-speed computer graphics and character animation technology, we created digital doppelgangers of students and placed them in a learning-byexplaining task where they interacted with digital doppelgangers of themselves. We investigate the research question of how does increasing the similarity of the physical appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual human listener in a learning-by-explaining paradigm. It presents an analysis of how students’ perceptions of the resemblance impact their learning experience and outcomes. The analysis and results offer insight into the promise and limitation of the application of this novel technology to pedagogical agents research.},
keywords = {ARL, DoD, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Barnes, Michael J.; Wang, Ning; Chen, Jessie Y. C.
Transparency Communication for Machine Learning in Human-Automation Interaction Book Section
In: Human and Machine Learning, pp. 75–90, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-90402-3 978-3-319-90403-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@incollection{pynadath_transparency_2018,
title = {Transparency Communication for Machine Learning in Human-Automation Interaction},
author = {David V. Pynadath and Michael J. Barnes and Ning Wang and Jessie Y. C. Chen},
url = {http://link.springer.com/10.1007/978-3-319-90403-0_5},
doi = {10.1007/978-3-319-90403-0_5},
isbn = {978-3-319-90402-3 978-3-319-90403-0},
year = {2018},
date = {2018-06-01},
booktitle = {Human and Machine Learning},
pages = {75–90},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Technological advances offer the promise of autonomous systems to form human-machine teams that are more capable than their individual members. Understanding the inner workings of the autonomous systems, especially as machine-learning (ML) methods are being widely applied to the design of such systems, has become increasingly challenging for the humans working with them. The “black-box” nature of quantitative ML approaches poses an impediment to people’s situation awareness (SA) of these ML-based systems, often resulting in either disuse or over-reliance of autonomous systems employing such algorithms. Research in human-automation interaction has shown that transparency communication can improve teammates’ SA, foster the trust relationship, and boost the human-automation team’s performance. In this chapter, we will examine the implications of an agent transparency model for human interactions with ML-based agents using automated explanations. We will discuss the application of a particular ML method, reinforcement learning (RL), in Partially Observable Markov Decision Process (POMDP)-based agents, and the design of explanation algorithms for RL in POMDPs.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Pynadath, David V.; Wang, Ning; Rovira, Ericka; Barnes, Michael J.
A Nearest-Neighbor Approach to Recognizing Subjective Beliefs in Human-Robot Interaction Proceedings Article
In: Proceedings of The AAAI Workshop on Plan, Activity, and Intent Recognition (PAIR), Association for the Advancement of Artificial Intelligence, London, UK, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{pynadath_nearest-neighbor_2018,
title = {A Nearest-Neighbor Approach to Recognizing Subjective Beliefs in Human-Robot Interaction},
author = {David V. Pynadath and Ning Wang and Ericka Rovira and Michael J. Barnes},
url = {https://aied2018.utscic.edu.au/proceedings/},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of The AAAI Workshop on Plan, Activity, and Intent Recognition (PAIR)},
publisher = {Association for the Advancement of Artificial Intelligence},
address = {London, UK},
abstract = {Trust is critical to the success of human-robot interaction (HRI), and one of the critical antecedents to trust is transparency. To best interact with human teammates, a robot must be able to ensure that they understand its decision-making process. Recent work has developed automated explanation methods that can achieve this goal. However, individual differences among human teammates require that the robot dynamically adjust its explanation strategy based on their unobservable subjective beliefs. We therefore need methods by which a robot can recognize its teammates’ subjective beliefs relevant to trust-building (e.g., their understanding of the robot’s capabilities and process). We leverage a nonparametric method, common across many fields of artificial intelligence, to enable a robot to use its history of prior interactions as a means for recognizing and predicting a new teammate’s subjective beliefs. We first gather data combining observable behavior sequences with surveybased observations of typically unobservable subjective beliefs. We then use a nearest-neighbor approach to identify the prior teammates most similar to the new one. We use these neighbors to infer the likelihood of possible subjective beliefs, and the results provide insights into the types of subjective beliefs that are easy (and hard) to infer from purely behavioral observations.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen
Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing Proceedings Article
In: Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018, pp. 17–22, ACM Press, Beijing, China, 2018, ISBN: 978-1-4503-6376-1.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, MxR, UARC
@inproceedings{kang_socio-cultural_2018,
title = {Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang},
url = {http://dl.acm.org/citation.cfm?doid=3205326.3205348},
doi = {10.1145/3205326.3205348},
isbn = {978-1-4503-6376-1},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018},
pages = {17–22},
publisher = {ACM Press},
address = {Beijing, China},
abstract = {We explored how users perceive virtual characters that performed the role of a counseling interviewer, while presenting different levels of social class, as well as single or multi-tasking behavior. To investigate this subject, we designed a 2x2 experiment (tasking type and social class of the virtual counseling interviewer). In the experiment, participants experienced the counseling interview interactions over video conferencing on a smartphone. We measured user responses to and perceptions of the virtual human interviewer. The results demonstrate that the tasking types and social class of the virtual counselor affected user responses to and perceptions of the virtual counselor. The results offer insight into the design and development of effective, realistic, and believable virtual human counselors. Furthermore, the results also address current social questions about how smartphones might mediate social interactions, including human-agent interactions.},
keywords = {ARL, DoD, MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Merchant, Chirag; Schwartz, David; Goldberg, Stephen L.
Learning by Explaining to a Digital Doppelganger Book Section
In: Intelligent Tutoring Systems, vol. 10858, pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, Social Simulation, UARC, Virtual Humans
@incollection{wang_learning_2018,
title = {Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and Chirag Merchant and David Schwartz and Stephen L. Goldberg},
url = {http://link.springer.com/10.1007/978-3-319-91464-0_25},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-05-01},
booktitle = {Intelligent Tutoring Systems},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. An emerging computer animation technology makes the creation of digital doppelgangers an accessible reality. This allows researchers in pedagogical agents to explore previously unexplorable research questions, such as how does increasing the similarity in appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual listener in a learning-by-explaining paradigm. Results offer insight into the promise and limitation of this novel technology.},
keywords = {ARL, DoD, MedVR, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Traum, David; Henry, Cassidy; Lukin, Stephanie; Artstein, Ron; Gervitz, Felix; Pollard, Kim; Bonial, Claire; Lei, Su; Voss, Clare R.; Marge, Matthew; Hayes, Cory J.; Hill, Susan G.
Dialogue Structure Annotation for Multi-Floor Interaction Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 104–111, ELRA, Miyazaki, Japan, 2018, ISBN: 979-10-95546-00-9.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{traum_dialogue_2018,
title = {Dialogue Structure Annotation for Multi-Floor Interaction},
author = {David Traum and Cassidy Henry and Stephanie Lukin and Ron Artstein and Felix Gervitz and Kim Pollard and Claire Bonial and Su Lei and Clare R. Voss and Matthew Marge and Cory J. Hayes and Susan G. Hill},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/672.html},
isbn = {979-10-95546-00-9},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {104–111},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {We present an annotation scheme for meso-level dialogue structure, specifically designed for multi-floor dialogue. The scheme includes a transaction unit that clusters utterances from multiple participants and floors into units according to realization of an initiator’s intent, and relations between individual utterances within the unit. We apply this scheme to annotate a corpus of multi-floor human-robot interaction dialogues. We examine the patterns of structure observed in these dialogues and present inter-annotator statistics and relative frequencies of types of relations and transaction units. Finally, some example applications of these annotations are introduced.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Lukin, Stephanie M.; Foots, Ashley; Henry, Cassidy; Marge, Matthew; Pollard, Kimberly A.; Artstein, Ron; Traum, David; Voss, Clare R.
Human-Robot Dialogue and Collaboration in Search and Navigation Proceedings Article
In: Proceedings of the AREA Workshop: Annotation, Recognition, and Evaluation of Actions, AREA 2018, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, Virtual Humans
@inproceedings{bonial_human-robot_2018,
title = {Human-Robot Dialogue and Collaboration in Search and Navigation},
author = {Claire Bonial and Stephanie M. Lukin and Ashley Foots and Cassidy Henry and Matthew Marge and Kimberly A. Pollard and Ron Artstein and David Traum and Clare R. Voss},
url = {http://www.areaworkshop.org/wp-content/uploads/2018/05/4.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the AREA Workshop: Annotation, Recognition, and Evaluation of Actions},
publisher = {AREA 2018},
address = {Miyazaki, Japan},
abstract = {Collaboration with a remotely located robot in tasks such as disaster relief and search and rescue can be facilitated by grounding natural language task instructions into actions executable by the robot in its current physical context. The corpus we describe here provides insight into the translation and interpretation a natural language instruction undergoes starting from verbal human intent, to understanding and processing, and ultimately, to robot execution. We use a ‘Wizard-of-Oz’ methodology to elicit the corpus data in which a participant speaks freely to instruct a robot on what to do and where to move through a remote environment to accomplish collaborativesearchandnavigationtasks. Thisdataoffersthepotentialforexploringandevaluatingactionmodelsbyconnectingnatural language instructions to execution by a physical robot (controlled by a human ‘wizard’). In this paper, a description of the corpus (soon to be openly available) and examples of actions in the dialogue are provided.},
keywords = {ARL, DoD, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Trout, Theron; Russell, Stephen M.; Harrison, Andre V.; Spicer, Ryan; Dennison, Mark S.; Thomas, Jerald; Rosenberg, Evan Suma
Collaborative mixed reality (MxR) and networked decision making Proceedings Article
In: Next-Generation Analyst VI, pp. 21, SPIE, Orlando, Florida, 2018, ISBN: 978-1-5106-1817-6 978-1-5106-1818-3.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC
@inproceedings{trout_collaborative_2018,
title = {Collaborative mixed reality (MxR) and networked decision making},
author = {Theron Trout and Stephen M. Russell and Andre V. Harrison and Ryan Spicer and Mark S. Dennison and Jerald Thomas and Evan Suma Rosenberg},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10653/2309959/Collaborative-mixed-reality-MxR-and-networked-decision-making/10.1117/12.2309959.full},
doi = {10.1117/12.2309959},
isbn = {978-1-5106-1817-6 978-1-5106-1818-3},
year = {2018},
date = {2018-04-01},
booktitle = {Next-Generation Analyst VI},
pages = {21},
publisher = {SPIE},
address = {Orlando, Florida},
abstract = {Collaborative decision-making remains a significant research challenge that is made even more complicated in real-time or tactical problem-contexts. Advances in technology have dramatically assisted the ability for computers and networks to improve the decision-making process (i.e. intelligence, design, and choice). In the intelligence phase of decision making, mixed reality (MxR) has shown a great deal of promise through implementations of simulation and training. However little research has focused on an implementation of MxR to support the entire scope of the decision cycle, let alone collaboratively and in a tactical context. This paper presents a description of the design and initial implementation for the Defense Integrated Collaborative Environment (DICE), an experimental framework for supporting theoretical and empirical research on MxR for tactical decision-making support.},
keywords = {ARL, DoD, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Rovira, Ericka; Barnes, Michael J.; Hill, Susan G.
In: Persuasive Technology, vol. 10809, pp. 56–69, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-78977-4 978-3-319-78978-1.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@incollection{wang_is_2018,
title = {Is It My Looks? Or Something I Said? The Impact of Explanations, Embodiment, and Expectations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Ericka Rovira and Michael J. Barnes and Susan G. Hill},
url = {http://link.springer.com/10.1007/978-3-319-78978-1_5},
doi = {10.1007/978-3-319-78978-1_5},
isbn = {978-3-319-78977-4 978-3-319-78978-1},
year = {2018},
date = {2018-04-01},
booktitle = {Persuasive Technology},
volume = {10809},
pages = {56–69},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Trust is critical to the success of human-robot interaction. Research has shown that people will more accurately trust a robot if they have an accurate understanding of its decision-making process. The Partially Observable Markov Decision Process (POMDP) is one such decision-making process, but its quantitative reasoning is typically opaque to people. This lack of transparency is exacerbated when a robot can learn, making its decision making better, but also less predictable. Recent research has shown promise in calibrating human-robot trust by automatically generating explanations of POMDP-based decisions. In this work, we explore factors that can potentially interact with such explanations in influencing human decision-making in human-robot teams. We focus on explanations with quantitative expressions of uncertainty and experiment with common design factors of a robot: its embodiment and its communication strategy in case of an error. Results help us identify valuable properties and dynamics of the human-robot trust relationship.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Weber, René; Mangus, J. Michael; Huskey, Richard; Hopp, Frederic R.; Amir, Ori; Swanson, Reid; Gordon, Andrew; Khooshabeh, Peter; Hahn, Lindsay; Tamborini, Ron
Extracting Latent Moral Information from Text Narratives: Relevance, Challenges, and Solutions Journal Article
In: Communication Methods and Measures, vol. 12, no. 2-3, pp. 119–139, 2018, ISSN: 1931-2458, 1931-2466.
Abstract | Links | BibTeX | Tags: ARL, DoD, Narrative
@article{weber_extracting_2018,
title = {Extracting Latent Moral Information from Text Narratives: Relevance, Challenges, and Solutions},
author = {René Weber and J. Michael Mangus and Richard Huskey and Frederic R. Hopp and Ori Amir and Reid Swanson and Andrew Gordon and Peter Khooshabeh and Lindsay Hahn and Ron Tamborini},
url = {https://www.tandfonline.com/doi/full/10.1080/19312458.2018.1447656},
doi = {10.1080/19312458.2018.1447656},
issn = {1931-2458, 1931-2466},
year = {2018},
date = {2018-03-01},
journal = {Communication Methods and Measures},
volume = {12},
number = {2-3},
pages = {119–139},
abstract = {Moral Foundations Theory (MFT) and the Model of Intuitive Morality and Exemplars (MIME) contend that moral judgments are built on a universal set of basic moral intuitions. A large body of research has supported many of MFT’s and the MIME’s central hypotheses. Yet, an important prerequisite of this research—the ability to extract latent moral content represented in media stimuli with a reliable procedure—has not been systematically studied. In this article, we subject different extraction procedures to rigorous tests, underscore challenges by identifying a range of reliabilities, develop new reliability test and coding procedures employing computational methods, and provide solutions that maximize the reliability and validity of moral intuition extraction. In six content analytical studies, including a large crowd-based study, we demonstrate that: (1) traditional content analytical approaches lead to rather low reliabilities; (2) variation in coding reliabilities can be predicted by both text features and characteristics of the human coders; and (3) reliability is largely unaffected by the detail of coder training. We show that a coding task with simplified training and a coding technique that treats moral foundations as fast, spontaneous intuitions leads to acceptable inter-rater agreement, and potentially to more valid moral intuition extractions. While this study was motivated by issues related to MFT and MIME research, the methods and findings in this study have implications for extracting latent content from text narratives that go beyond moral information. Accordingly, we provide a tool for researchers interested in applying this new approach in their own work.},
keywords = {ARL, DoD, Narrative},
pubstate = {published},
tppubtype = {article}
}
Khooshabeh, Peter; Lucas, Gale
Virtual Human Role Players for Studying Social Factors in Organizational Decision Making Journal Article
In: Frontiers in Psychology, vol. 9, 2018, ISSN: 1664-1078.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@article{khooshabeh_virtual_2018,
title = {Virtual Human Role Players for Studying Social Factors in Organizational Decision Making},
author = {Peter Khooshabeh and Gale Lucas},
url = {http://journal.frontiersin.org/article/10.3389/fpsyg.2018.00194/full},
doi = {10.3389/fpsyg.2018.00194},
issn = {1664-1078},
year = {2018},
date = {2018-03-01},
journal = {Frontiers in Psychology},
volume = {9},
abstract = {The cyber domain of military operations presents many challenges. A unique element is the social dynamic between cyber operators and their leadership because of the novel subject matter expertise involved in conducting technical cyber tasks, so there will be situations where senior leaders might have much less domain knowledge or no experience at all relative to the warfighters who report to them. Nonetheless, it will be important for junior cyber operators to convey convincing information relevant to a mission in order to persuade or influence a leader to make informed decisions. The power dynamic will make it difficult for the junior cyber operator to successfully influence a higher ranking leader. Here we present a perspective with a sketch for research paradigm(s) to study how different factors (normative vs. informational social influence, degree of transparency, and perceived appropriateness of making suggestions) might interact with differential social power dynamics of individuals in cyber decision-making contexts. Finally, we contextualize this theoretical perspective for the research paradigms in viable training technologies.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Pynadath, David V.; Barnes, Michael J.; Hill, Susan G.
Comparing Two Automatically Generated Explanations on the Perception of a Robot Teammate Proceedings Article
In: Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction, ACM, Chicago, IL, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{wang_comparing_2018,
title = {Comparing Two Automatically Generated Explanations on the Perception of a Robot Teammate},
author = {Ning Wang and David V. Pynadath and Michael J. Barnes and Susan G. Hill},
url = {http://people.ict.usc.edu/ nwang/PDF/HRI-ERS-2018-Wang.pdf},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction},
publisher = {ACM},
address = {Chicago, IL},
abstract = {Trust is critical to the success of human-robot interaction (HRI). Research has shown that people will more accurately trust a robot if they have a more accurate understanding of its decisionmaking process. Recent research has shown promise in calibrating human-agent trust by automatically generating explanations of decision-making process such as POMDP-based ones. In this paper, we compare two automatically generated explanations, one with quantitative information on uncertainty and one based on sensor observations, and study the impact of such explanations on perception of a robot in human-robot team.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Neubauer, Catherine; Mozgai, Sharon; Scherer, Stefan; Woolley, Joshua; Chuang, Brandon
Manual and Automatic Measures Confirm-Intranasal Oxytocin Increases Facial Expressivity Journal Article
In: Affective Computing and Intelligent Interaction, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, VHTL, Virtual Humans
@article{neubauer_manual_2017,
title = {Manual and Automatic Measures Confirm-Intranasal Oxytocin Increases Facial Expressivity},
author = {Catherine Neubauer and Sharon Mozgai and Stefan Scherer and Joshua Woolley and Brandon Chuang},
url = {https://www.researchgate.net/publication/321644417_Manual_and_Automatic_Measures_Confirm-Intranasal_Oxytocin_Increases_Facial_Expressivity?enrichId=rgreq-22efb1e32ef30cdd22e6bee2b3b63d56-XXX&enrichSource=Y292ZXJQYWdlOzMyMTY0NDQxNztBUzo1NjkwNTI4NzM4NTQ5NzZAMTUxMjY4NDE4NTcyOQ%3D%3D&el=1_x_2&_esc=publicationCoverPdf},
year = {2017},
date = {2017-12-01},
journal = {Affective Computing and Intelligent Interaction},
abstract = {The effects of oxytocin on facial emotional expressivity were investigated in individuals with schizophrenia and age-matched healthy controls during the completion of a Social Judgment Task (SJT) with a double-blind, placebo-controlled, cross-over design. Although pharmacological interventions exist to help alleviate some symptoms of schizophrenia, currently available agents are not effective at improving the severity of blunted facial affect. Participant facial expressivity was previously quantified from video recordings of the SJT using a wellvalidated manual approach (Facial Expression Coding System; FACES). We confirm these findings using an automated computer-based approach. Using both methods we found that the administration of oxytocin significantly increased total facial expressivity in individuals with schizophrenia and increased facial expressivity at trend level in healthy controls. Secondary analysis showed that oxytocin also significantly increased the frequency of negative valence facial expressions in individuals with schizophrenia but not in healthy controls and that oxytocin did not significantly increase positive valence facial expressions in either group. Both manual coding and automatic facial analysis revealed the same pattern of findings. Considering manual annotation can be expensive and timeconsuming, these results suggest that automatic facial analysis may be an efficient and cost-effective alternative to currently utilized manual approaches and may be ready for use in clinical settings.},
keywords = {ARL, DoD, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Swanson, Reid William; Gordon, Andrew S.; Khooshabeh, Peter; Sagae, Kenji; Huskey, Richard; Mangus, Michael; Amir, Ori; Weber, Rene
An Empirical Analysis of Subjectivity and Narrative Levels in Weblog Storytelling Across Cultures Journal Article
In: Dialogue & Discourse, vol. 8, no. 2, pp. 105–128, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, ICB, Narrative, UARC
@article{swanson_empirical_2017,
title = {An Empirical Analysis of Subjectivity and Narrative Levels in Weblog Storytelling Across Cultures},
author = {Reid William Swanson and Andrew S. Gordon and Peter Khooshabeh and Kenji Sagae and Richard Huskey and Michael Mangus and Ori Amir and Rene Weber},
url = {https://www.researchgate.net/publication/321170929_An_Empirical_Analysis_of_Subjectivity_and_Narrative_Levels_in_Personal_Weblog_Storytelling_Across_Cultures?_sg=Ck1pqxhW1uuTUe54DX5BLVYey6L6DkwTpjnes1ctAEuGQDHxoEOr887eKWjHIA0_-kk4ya9dXwEZ4OM},
doi = {10.5087/dad.2017.205},
year = {2017},
date = {2017-11-01},
journal = {Dialogue & Discourse},
volume = {8},
number = {2},
pages = {105–128},
abstract = {Storytelling is a universal activity, but the way in which discourse structure is used to persuasively convey ideas and emotions may depend on cultural factors. Because first-person accounts of life experiences can have a powerful impact in how a person is perceived, the storyteller may instinctively employ specific strategies to shape the audience’s perception. Hypothesizing that some of the differences in storytelling can be captured by the use of narrative levels and subjectivity, we analyzed over one thousand narratives taken from personal weblogs. First, we compared stories from three different cultures written in their native languages: English, Chinese and Farsi. Second, we examined the impact of these two discourse properties on a reader’s attitude and behavior toward the narrator. We found surprising similarities and differences in how stories are structured along these two dimensions across cultures. These discourse properties have a small but significant impact on a reader’s behavioral response toward the narrator.},
keywords = {ARL, DoD, ICB, Narrative, UARC},
pubstate = {published},
tppubtype = {article}
}
Rosario, Dalton; Borel, Christoph; Conover, Damon; McAlinden, Ryan; Ortiz, Anthony; Shiver, Sarah; Simon, Blair
Small Drone Field Experiment: Data Collection & Processing Journal Article
In: NATO SET-241 Symposium, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, STG, UARC
@article{rosario_small_2017,
title = {Small Drone Field Experiment: Data Collection & Processing},
author = {Dalton Rosario and Christoph Borel and Damon Conover and Ryan McAlinden and Anthony Ortiz and Sarah Shiver and Blair Simon},
url = {https://arxiv.org/abs/1711.10693},
year = {2017},
date = {2017-11-01},
journal = {NATO SET-241 Symposium},
abstract = {Following an initiative formalized in April 2016—formally known as ARL West—between the U.S. Army Research Laboratory (ARL) and University of Southern California’s Institute for Creative Technologies (USC ICT), a field experiment was coordinated and executed in the summer of 2016 by ARL, USC ICT, and Headwall Photonics. The purpose was to image part of the USC main campus in Los Angeles, USA, using two portable COTS (commercial off the shelf) aerial drone solutions for data acquisition, for photogrammetry (3D reconstruction from images), and fusion of hyperspectral data with the recovered set of 3D point clouds representing the target area. The research aims for determining the viability of having a machine capable of segmenting the target area into key material classes (e.g., manmade structures, live vegetation, water) for use in multiple purposes, to include providing the user with a more accurate scene understanding and enabling the unsupervised automatic sampling of meaningful material classes from the target area for adaptive semi-supervised machine learning. In the latter, a target-set library may be used for automatic machine training with data of local material classes, as an example, to increase the prediction chances of machines recognizing targets. The field experiment and associated data post processing approach to correct for reflectance, geo-rectify, recover the area’s dense point clouds from images, register spectral with elevation properties of scene surfaces from the independently collected datasets, and generate the desired scene segmented maps are discussed. Lessons learned from the experience are also highlighted throughout the paper.},
keywords = {ARL, DoD, STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Chollet, Mathieu; Mozgai, Sharon; Dennison, Mark; Khooshabeh, Peter; Scherer, Stefan
The relationship between task-induced stress, vocal changes, and physiological state during a dyadic team task Proceedings Article
In: Proceedings of the 19th ACM International Conference on Multimodal Interaction, pp. 426–432, ACM Press, Glasgow, UK, 2017, ISBN: 978-1-4503-5543-8.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, VHTL, Virtual Humans
@inproceedings{neubauer_relationship_2017,
title = {The relationship between task-induced stress, vocal changes, and physiological state during a dyadic team task},
author = {Catherine Neubauer and Mathieu Chollet and Sharon Mozgai and Mark Dennison and Peter Khooshabeh and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=3136755.3136804},
doi = {10.1145/3136755.3136804},
isbn = {978-1-4503-5543-8},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of the 19th ACM International Conference on Multimodal Interaction},
pages = {426–432},
publisher = {ACM Press},
address = {Glasgow, UK},
abstract = {It is commonly known that a relationship exists between the human voice and various emotional states. Past studies have demonstrated changes in a number of vocal features, such as fundamental frequency f0 and peakSlope, as a result of varying emotional state. These voice characteristics have been shown to relate to emotional load, vocal tension, and, in particular, stress. Although much research exists in the domain of voice analysis, few studies have assessed the relationship between stress and changes in the voice during a dyadic team interaction. The aim of the present study was to investigate the multimodal interplay between speech and physiology during a high-workload, high-stress team task. Specifically, we studied task-induced effects on participants' vocal signals, specifically, the f0 and peakSlope features, as well as participants' physiology, through cardiovascular measures. Further, we assessed the relationship between physiological states related to stress and changes in the speaker's voice. We recruited participants with the specific goal of working together to diffuse a simulated bomb. Half of our sample participated in an "Ice Breaker" scenario, during which they were allowed to converse and familiarize themselves with their teammate prior to the task, while the other half of the sample served as our "Control". Fundamental frequency (f0), peakSlope, physiological state, and subjective stress were measured during the task. Results indicated that f0 and peakSlope significantly increased from the beginning to the end of each task trial, and were highest in the last trial, which indicates an increase in emotional load and vocal tension. Finally, cardiovascular measures of stress indicated that the vocal and emotional load of speakers towards the end of the task mirrored a physiological state of psychological "threat".},
keywords = {ARL, DoD, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.; Merchant, Chirag
The Dynamics of Human-Agent Trust with POMDP-Generated Explanations Proceedings Article
In: Proceedings of the 17th International Conference on Intelligent Virtual Agents (IVA 2017), Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, Social Simulation, UARC
@inproceedings{wang_dynamics_2017,
title = {The Dynamics of Human-Agent Trust with POMDP-Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill and Chirag Merchant},
url = {https://link.springer.com/chapter/10.1007/978-3-319-67401-8_58},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 17th International Conference on Intelligent Virtual Agents (IVA 2017)},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {Partially Observable Markov Decision Processes (POMDPs) enable optimized decision making by robots, agents, and other autonomous systems. This quantitative optimization can also be a limitation in human-agent interaction, as the resulting autonomous behavior, while possibly optimal, is often impenetrable to human teammates, leading to improper trust and, subsequently, disuse or misuse of such systems [1]. Automatically generated explanations of POMDP-based decisions have shown promise in calibrating human-agent trust [3]. However, these “one-size-fits-all” static explanation policies are insufficient to accommodate different communication preferences across people. In this work, we analyze human behavior in a human-robot interaction (HRI) scenario, to find behavioral indicators of trust in the agent’s ability. We evaluate four hypothesized behavioral measures that an agent could potentially use to dynamically infer its teammate’s current trust level. The conclusions drawn can potentially inform the design of intelligent agents that can automatically adapt their explanation policies as they observe the behavioral responses of their human teammates.},
keywords = {ARL, DoD, MedVR, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Marge, Matthew; Bonial, Claire; Foots, Ashley; Hayes, Cory; Henry, Cassidy; Pollard, Kimberly; Artstein, Ron; Voss, Clare; Traum, David
Exploring Variation of Natural Human Commands to a Robot in a Collaborative Navigation Task Proceedings Article
In: Proceedings of the First Workshop on Language Grounding for Robotics, pp. 58–66, Association for Computational Linguistics, Vancouver, Canada, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{marge_exploring_2017,
title = {Exploring Variation of Natural Human Commands to a Robot in a Collaborative Navigation Task},
author = {Matthew Marge and Claire Bonial and Ashley Foots and Cory Hayes and Cassidy Henry and Kimberly Pollard and Ron Artstein and Clare Voss and David Traum},
url = {http://www.aclweb.org/anthology/W17-2808},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the First Workshop on Language Grounding for Robotics},
pages = {58–66},
publisher = {Association for Computational Linguistics},
address = {Vancouver, Canada},
abstract = {Robot-directed communication is variable, and may change based on human perception of robot capabilities. To collect training data for a dialogue system and to investigate possible communication changes over time, we developed a Wizard-of-Oz study that (a) simulates a robot’s limited understanding, and (b) collects dialogues where human participants build a progressively better mental model of the robot’s understanding. With ten participants, we collected ten hours of human-robot dialogue. We analyzed the structure of instructions that participants gave to a remote robot before it responded. Our findings show a general initial preference for including metric information (e.g., move forward 3 feet) over landmarks (e.g., move to the desk) in motion commands, but this decreased over time, suggesting changes in perception.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cassidy, Henry; Moolchandani, Pooja; Pollard, Kimberly A.; Bonial, Claire; Foots, Ashley; Artstein, Ron; Hayes, Cory; Voss, Claire R.; Traum, David; Marge, Matthew
Towards Efficient Human-Robot Dialogue Collection: Moving Fido into the VirtualWorld Proceedings Article
In: Proceedings of the WiNLP workshop, Vancouver, Canada, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{cassidy_towards_2017,
title = {Towards Efficient Human-Robot Dialogue Collection: Moving Fido into the VirtualWorld},
author = {Henry Cassidy and Pooja Moolchandani and Kimberly A. Pollard and Claire Bonial and Ashley Foots and Ron Artstein and Cory Hayes and Claire R. Voss and David Traum and Matthew Marge},
url = {http://www.winlp.org/wp-content/uploads/2017/final_papers_2017/52_Paper.pdf},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the WiNLP workshop},
address = {Vancouver, Canada},
abstract = {Our research aims to develop a natural dialogue interface between robots and humans. We describe two focused efforts to increase data collection efficiency towards this end: creation of an annotated corpus of interaction data, and a robot simulation, allowing greater flexibility in when and where we can run experiments.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Social decisions and fairness change when people’s interests are represented by autonomous agents Journal Article
In: Autonomous Agents and Multi-Agent Systems, pp. 163–187, 2017, ISSN: 1387-2532, 1573-7454.
Abstract | Links | BibTeX | Tags: ARL, DoD, Virtual Humans
@article{de_melo_social_2017,
title = {Social decisions and fairness change when people’s interests are represented by autonomous agents},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s10458-017-9376-6},
doi = {10.1007/s10458-017-9376-6},
issn = {1387-2532, 1573-7454},
year = {2017},
date = {2017-07-01},
journal = {Autonomous Agents and Multi-Agent Systems},
pages = {163–187},
abstract = {There has been growing interest on agents that represent people’s interests or act on their behalf such as automated negotiators, self-driving cars, or drones. Even though people will interact often with others via these agent representatives, little is known about whether people’s behavior changes when acting through these agents, when compared to direct interaction with others. Here we show that people’s decisions will change in important ways because of these agents; specifically, we showed that interacting via agents is likely to lead people to behave more fairly, when compared to direct interaction with others. We argue this occurs because programming an agent leads people to adopt a broader perspective, consider the other side’s position, and rely on social norms—such as fairness—to guide their decision making. To support this argument, we present four experiments: in Experiment 1 we show that people made fairer offers in the ultimatum and impunity games when interacting via agent representatives, when compared to direct interaction; in Experiment 2, participants were less likely to accept unfair offers in these games when agent representatives were involved; in Experiment 3, we show that the act of thinking about the decisions ahead of time—i.e., under the so-called “strategy method”—can also lead to increased fairness, even when no agents are involved; and, finally, in Experiment 4 we show that participants were less likely to reach an agreement with unfair counterparts in a negotiation setting.We discuss theoretical implications for our understanding of the nature of people’s social behavior with agent representatives, as well as practical implications for the design of agents that have the potential to increase fairness in society.},
keywords = {ARL, DoD, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Spicer, Ryan P.; Russell, Stephen M.; Rosenberg, Evan Suma
The mixed reality of things: emerging challenges for human-information interaction Proceedings Article
In: Proceedings Volume 10207, Next-Generation Analyst V, SPIE, Anaheim, CA, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC
@inproceedings{spicer_mixed_2017,
title = {The mixed reality of things: emerging challenges for human-information interaction},
author = {Ryan P. Spicer and Stephen M. Russell and Evan Suma Rosenberg},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2268004},
doi = {10.1117/12.2268004},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings Volume 10207, Next-Generation Analyst V},
publisher = {SPIE},
address = {Anaheim, CA},
abstract = {Virtual and mixed reality technology has advanced tremendously over the past several years. This nascent medium has the potential to transform how people communicate over distance, train for unfamiliar tasks, operate in challenging environments, and how they visualize, interact, and make decisions based on complex data. At the same time, the marketplace has experienced a proliferation of network-connected devices and generalized sensors that are becoming increasingly accessible and ubiquitous. As the "Internet of Things" expands to encompass a predicted 50 billion connected devices by 2020, the volume and complexity of information generated in pervasive and virtualized environments will continue to grow exponentially. The convergence of these trends demands a theoretically grounded research agenda that can address emerging challenges for human-information interaction (HII). Virtual and mixed reality environments can provide controlled settings where HII phenomena can be observed and measured, new theories developed, and novel algorithms and interaction techniques evaluated. In this paper, we describe the intersection of pervasive computing with virtual and mixed reality, identify current research gaps and opportunities to advance the fundamental understanding of HII, and discuss implications for the design and development of cyber-human systems for both military and civilian use.},
keywords = {ARL, DoD, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Berkiten, Sema; Halber, Maciej; Solomon, Justin; Ma, Chongyang; Li, Hao; Rusinkiewicz, Szymon
Learning Detail Transfer based on Geometric Features Journal Article
In: Computer Graphics Forum, vol. 36, no. 2, pp. 361–373, 2017, ISSN: 01677055.
Abstract | Links | BibTeX | Tags: ARL, DoD, Graphics, UARC
@article{berkiten_learning_2017,
title = {Learning Detail Transfer based on Geometric Features},
author = {Sema Berkiten and Maciej Halber and Justin Solomon and Chongyang Ma and Hao Li and Szymon Rusinkiewicz},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.13132/full},
doi = {10.1111/cgf.13132},
issn = {01677055},
year = {2017},
date = {2017-05-01},
journal = {Computer Graphics Forum},
volume = {36},
number = {2},
pages = {361–373},
abstract = {The visual richness of computer graphics applications is frequently limited by the difficulty of obtaining high-quality, detailed 3D models. This paper proposes a method for realistically transferring details (specifically, displacement maps) from existing high-quality 3D models to simple shapes that may be created with easy-to-learn modeling tools. Our key insight is to use metric learning to find a combination of geometric features that successfully predicts detail-map similarities on the source mesh; we use the learned feature combination to drive the detail transfer. The latter uses a variant of multi-resolution non-parametric texture synthesis, augmented by a high-frequency detail transfer step in texture space. We demonstrate that our technique can successfully transfer details among a variety of shapes including furniture and clothing.},
keywords = {ARL, DoD, Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Conover, Damon M.; Beidleman, Brittany; McAlinden, Ryan; Borel-Donohue, Christoph C.
Visualizing UAS-Collected Imagery Using Augmented Reality Proceedings Article
In: Proceedings of the Next-Generation Analyst V conference, pp. 102070C, SPIE, Anaheim, CA, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, STG, UARC
@inproceedings{conover_visualizing_2017,
title = {Visualizing UAS-Collected Imagery Using Augmented Reality},
author = {Damon M. Conover and Brittany Beidleman and Ryan McAlinden and Christoph C. Borel-Donohue},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2262864},
doi = {10.1117/12.2262864},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Next-Generation Analyst V conference},
pages = {102070C},
publisher = {SPIE},
address = {Anaheim, CA},
abstract = {One of the areas where augmented reality will have an impact is in the visualization of 3-D data. 3-D data has traditionally been viewed on a 2-D screen, which has limited its utility. Augmented reality head-mounted displays, such as the Microsoft HoloLens, make it possible to view 3-D data overlaid on the real world. This allows a user to view and interact with the data in ways similar to how they would interact with a physical 3-D object, such as moving, rotating, or walking around it. A type of 3-D data that is particularly useful for military applications is geo-specific 3-D terrain data, and the visualization of this data is critical for training, mission planning, intelligence, and improved situational awareness. Advances in Unmanned Aerial Systems (UAS), photogrammetry software, and rendering hardware have drastically reduced the technological and financial obstacles in collecting aerial imagery and in generating 3-D terrain maps from that imagery. Because of this, there is an increased need to develop new tools for the exploitation of 3-D data. We will demonstrate how the HoloLens can be used as a tool for visualizing 3-D terrain data. We will describe: 1) how UAS-collected imagery is used to create 3-D terrain maps, 2) how those maps are deployed to the HoloLens, 3) how a user can view and manipulate the maps, and 4) how multiple users can view the same virtual 3-D object at the same time.},
keywords = {ARL, DoD, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen; Amir, Ori; Lin, Rebecca
Social influence of humor in virtual human counselor's self-disclosure Journal Article
In: Computer Animation and Virtual Worlds, vol. 28, no. 3-4, 2017, ISSN: 15464261.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, MxR, UARC
@article{kang_social_2017,
title = {Social influence of humor in virtual human counselor's self-disclosure},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang and Ori Amir and Rebecca Lin},
url = {http://doi.wiley.com/10.1002/cav.1763},
doi = {10.1002/cav.1763},
issn = {15464261},
year = {2017},
date = {2017-04-01},
journal = {Computer Animation and Virtual Worlds},
volume = {28},
number = {3-4},
abstract = {We explored the social influence of humor in a virtual human counselor's selfdisclosure while also varying the ethnicity of the virtual counselor. In a 2 × 3 experiment (humor and ethnicity of the virtual human counselor), participants experienced counseling interview interactions via Skype on a smartphone. We measured user responses to and perceptions of the virtual human counselor. The results demonstrate that humor positively affects user responses to and perceptions of a virtual counselor. The results further suggest that matching styles of humor with a virtual counselor's ethnicity influences user responses and perceptions. The results offer insight into the effective design and development of realistic and believable virtual human counselors. Furthermore, they illuminate the potential use of humor to enhance self‐disclosure in human–agent interactions.},
keywords = {ARL, DoD, MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Scherer, Scherer
The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment Proceedings Article
In: Proceedings of the Preconference on Affective Computing at the Society for Affective Science, Boston, MA, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{neubauer_effects_2017,
title = {The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment},
author = {Catherine Neubauer and Scherer Scherer},
url = {http://ict.usc.edu/pubs/The%20Effects%20of%20Pre-task%20Team%20Collaboration%20on%20Facial%20Expression%20and%20Speech%20Entrainment.pdf},
year = {2017},
date = {2017-04-01},
booktitle = {Proceedings of the Preconference on Affective Computing at the Society for Affective Science},
address = {Boston, MA},
abstract = {Many everyday tasks are complex and require the coordination of one or more individuals. Such tasks can be relatively simple like passing a ball to a friend during a game of catch, while others are more complex such as performing a life-saving surgery where surgeons, anesthesiologists and nurses all work together in a multi-person team [1]. Such coordination requires the appropriate allocation of cognitive and behavioral effort to meet the changing demands of their environment and cannot be completed alone [1]. These mutually cooperative behaviors can include team communication, body position and even affective cues [2]. Some behaviors are explicitly controlled to be coordinated [3] (e.g., when an individual purposely attempts to follow the behaviors of their teammate or team leader), while others are implicit or unconscious. Presently, these shared behaviors have been referred to as entrainment [4] [5], mimicry [6] [7] and even action matching [8] [9]; however, the specific term used typically refers to the underlying theoretical cause for the phenomenon. Theoretically, entrainment can be explained as the spontaneous interpersonal coupling that occurs because the behavior of one or more individuals is affected by another’s behavior in a closed loop system. Additionally, such behavior is typically evident when working on a mutual, goal-directed task [10]. Therefore, for the purposes of this paper we will refer to the cooperative behaviors between teammates that support problem solving as entrainment.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Choromanski, Igor; Neubauer, Catherine; Krum, David M.; Spicer, Ryan; Campbell, Julia
Mixed Reality Training for Tank Platoon Leader Communication Skills Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 333–334, IEEE, Los Angeles, CA, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC
@inproceedings{khooshabeh_mixed_2017,
title = {Mixed Reality Training for Tank Platoon Leader Communication Skills},
author = {Peter Khooshabeh and Igor Choromanski and Catherine Neubauer and David M. Krum and Ryan Spicer and Julia Campbell},
url = {http://ieeexplore.ieee.org/document/7892312/#full-text-section},
doi = {10.1109/VR.2017.7892312},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {333–334},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Here we describe the design and usability evaluation of a mixed reality prototype to simulate the role of a tank platoon leader, who is an individual who not only is a tank commander, but also directs a platoon of three other tanks with their own respective tank commanders. The domain of tank commander training has relied on physical simulators of the actual Abrams tank and encapsulates the whole crew. The TALK-ON system we describe here focuses on training communication skills of the leader in a simulated tank crew. We report results from a usability evaluation and discuss how they will inform our future work for collective tank training.},
keywords = {ARL, DoD, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Spicer, Ryan; McAlinden, Ryan; Conover, Damon
Producing Usable Simulation Terrain Data from UAS-Collected Imagery Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, STG, UARC
@inproceedings{spicer_producing_2016,
title = {Producing Usable Simulation Terrain Data from UAS-Collected Imagery},
author = {Ryan Spicer and Ryan McAlinden and Damon Conover},
url = {http://ict.usc.edu/pubs/Producing%20Usable%20Simulation%20Terrain%20Data%20from%20UAS-Collected%20Imagery.pdf},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {At I/ITSEC 2015, we presented an approach to produce geo-referenced, highly-detailed (10cm or better) 3D models for an area of interest using imagery collected from cheap, commercial-off-the-shelf, multirotor Unmanned Aerial Systems (UAS). This paper discusses the next steps in making this data usable for modern-day game and simulation engines, specifically how it may be visually rendered, used and reasoned with by the physics system, the artificial intelligence (AI), the simulation entities, and other components. The pipeline begins by segmenting the georeferenced point cloud created by the UAS imagery into terrain (elevation data) and structures or objects, including vegetation, structures, roads and other surface features. Attributes such as slope and edge detection and color matching are used to perform segmentation and clustering. After the terrain and objects are segmented, they are exported into engine-agnostic formats (georeferenced GeoTIFF digital elevation model (DEM) and ground textures, OBJ/FBX mesh files and JPG textures), which serves as the basis for their representation in-engine. The data is then attributed with metadata used in reasoning – collision surfaces, navigation meshes/networks, apertures, physics attributes (line-of-sight, ray-tracing), material surfaces, and others. Finally, it is loaded into the engine for real-time processing during runtime. The pipeline has been tested with several engines, including Unity, VBS, Unreal and TitanIM. The paper discusses the pipeline from collection to rendering, and as well as how other market/commercially-derived data can serve as the foundation for M&S terrain in the future. Examples of the output of this research are available online (McAlinden, 2016).},
keywords = {ARL, DoD, MxR, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Neubauer, Catherine; Woolley, Joshua; Khooshabeh, Peter; Scherer, Stefan
Getting to know you: a multimodal investigation of team behavior and resilience to stress Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 193–200, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{neubauer_getting_2016,
title = {Getting to know you: a multimodal investigation of team behavior and resilience to stress},
author = {Catherine Neubauer and Joshua Woolley and Peter Khooshabeh and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993195},
doi = {10.1145/2993148.2993195},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {193–200},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {Team cohesion has been suggested to be a critical factor in emotional resilience following periods of stress. Team cohesion may depend on several factors including emotional state, communication among team members and even psychophysiological response. The present study sought to employ several multimodal techniques designed to investigate team behavior as a means of understanding resilience to stress. We recruited 40 subjects to perform a cooperative-task in gender-matched, two-person teams. They were responsible for working together to meet a common goal, which was to successfully disarm a simulated bomb. This high-workload task requires successful cooperation and communication among members. We assessed several behaviors that relate to facial expression, word choice and physiological responses (i.e., heart rate variability) within this scenario. A manipulation of an â€oeice breaker†condition was used to induce a level of comfort or familiarity within the team prior to the task. We found that individuals in the â€oeice breaker†condition exhibited better resilience to subjective stress following the task. These individuals also exhibited more insight and cognitive speech, more positive facial expressions and were also able to better regulate their emotional expression during the task, compared to the control.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Kang, Sin-Hwa; Nye, Benjamin; Phillips, Artemisa; Campbell, Julia; Goldberg, Stephan L.
Cost-Effective Strategies for Producing Engaging Online Courseware Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, MedVR, MxR, STG, UARC
@inproceedings{mcalinden_cost-effective_2016,
title = {Cost-Effective Strategies for Producing Engaging Online Courseware},
author = {Ryan McAlinden and Sin-Hwa Kang and Benjamin Nye and Artemisa Phillips and Julia Campbell and Stephan L. Goldberg},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {As distributed learning (dL) and computer-based training (CBT) continue to proliferate, the methods of delivery often remain unengaging and bland for participants. Though many of the leaders in commercial online learning have improved their delivery style and quality in recent years, they continue to fall short in terms of user engagement and satisfaction. PowerPoint regurgitation and video lectures are commonplace and leave end users uninspired and wanting more. This paper discusses results from an ongoing research project, Captivating Virtual Instruction for Training (CVIT), which is aimed at understanding and improving dL through a series of recommendations and best practices for promoting and enhancing student engagement online. Though the central focus is on engagement, and how that translates to learning potential, a third variable (cost) has been examined to understand the financial and resource impacts on making content more interesting (i.e. the return on investment, or ROI). The paper presents findings from a 3-year long experiment comparing existing dL methods and techniques both within and outside of the Army. The project developed two dL versions of an existing Army course (Advanced Situational Awareness-Basic (ASA-B)) – the first was designed around producing material that was as engaging and as immersive as possible within a target budget; the second was a scaled-down version using more traditional, yet contemporary dL techniques (PowerPoint recital, video lectures). The two were then compared along three dimensions– engagement, learning and cost. The findings show that improved engagement in distributed courseware is possible without breaking the bank, though the returns on learning with these progressive approaches remain inconclusive. More importantly, it was determined that the quality and experience of the designers, production staff, writers, animators, programmers, and others cannot be underestimated, and that the familiar phrase – ‘you get what you pay for’ is as true with online learning as it is with other areas of content design and software development.},
keywords = {ARL, DoD, Learning Sciences, MedVR, MxR, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Marge, Matthew; Bonial, Claire; Pollard, Kimberly A.; Artstein, Ron; Byrne, Brendan; Hill, Susan G.; Voss, Clare; Traum, David
Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards Proceedings Article
In: Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),, Springer, Los Angeles, CA, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{marge_assessing_2016,
title = {Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards},
author = {Matthew Marge and Claire Bonial and Kimberly A. Pollard and Ron Artstein and Brendan Byrne and Susan G. Hill and Clare Voss and David Traum},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110460.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {The Wizard-of-Oz (WOz) method is a common experimental technique in virtual agent and human-robot dialogue research for eliciting natural communicative behavior from human partners when full autonomy is not yet possible. For the first phase of our research reported here, wizards play the role of dialogue manager, acting as a robot’s dialogue processing. We describe a novel step within WOz methodology that incorporates two wizards and control sessions: the wizards function much like corpus annotators, being asked to make independent judgments on how the robot should respond when receiving the same verbal commands in separate trials. We show that inter-wizard discussion after the control sessions and the resolution with a reconciled protocol for the follow-on pilot sessions successfully impacts wizard behaviors and significantly aligns their strategies. We conclude that, without control sessions, we would have been unlikely to achieve both the natural diversity of expression that comes with multiple wizards and a better protocol for modeling an automated system.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Boyce, Michael W.; Sottilare, Robert
Defining the Ill-Defined: From Abstract Principles to Applied Pedagogy Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling, vol. 4, pp. 19–37, US Army Research Laboratory, Orlando, FL, 2016, ISBN: 978-0-9893923-9-6.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, UARC
@incollection{nye_defining_2016,
title = {Defining the Ill-Defined: From Abstract Principles to Applied Pedagogy},
author = {Benjamin D. Nye and Michael W. Boyce and Robert Sottilare},
url = {https://gifttutoring.org/attachments/download/1736/Design%20Recommendations%20for%20ITS_Volume%204%20-%20Domain%20Modeling%20Book_web%20version_final.pdf},
isbn = {978-0-9893923-9-6},
year = {2016},
date = {2016-07-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
volume = {4},
pages = {19–37},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Attempts to define ill-defined domains in intelligent tutoring system (ITS) research has been approached a number of times (Fournier-Viger, Nkambou, & Nguifo, 2010; Lynch, Ashley, Pinkwart, & Aleven, 2009; Mitrovic & Weerasinghe, 2009; Jacovina, Snow, Dai, & McNamara, 2015; Woods, Stensrud, Wray, Haley, & Jones, 2015). Related research has tried to determine levels of ill-definedness for a domain (Le, Loll, & Pinkwart, 2013). Despite such attempts, the field has not yet converged on common guidelines to distinguish between well-defined versus ill-defined domains. We argue that such guidelines struggle to converge because a domain is too large to meaningfully categorize: every domain contains a mixture of well-defined and ill-defined tasks. While the co-existence of well-defined and ill-defined tasks in a single domain is nearly universally-agreed upon by researchers; this key point is often quickly buried by an extensive discussion about what makes certain domain tasks ill-defined (e.g., disagreement about ideal solutions, multiple solution paths). In this chapter, we first take a step back to consider what is meant by a domain in the context of learning. Next, based on this definition for a domain, we map out the components that are in a learning domain, since each component may have ill-defined parts. This leads into a discussion about the strategies that have been used to make ill-defined domains tractable for certain types of pedagogy. Examples of ITS research that applies these strategies are noted. Finally, we conclude with practical how-to considerations and open research questions for approaching ill-defined domains. This chapter should be considered a companion piece to our chapter in the prior volume of this series (Nye, Goldberg, & Hu, 2015). This chapter focuses on how to understand and transform ill-defined parts of domains, while the prior chapter discusses commonly-used learning tasks and authoring approaches for both well-defined and ill-defined tasks. As such, this chapter is intended to help the learner understand if and how different parts of the domain are ill-defined (and what to do about them). The companion piece in the authoring tools volume discusses different categories of well and ill-defined tasks, from the standpoint of attempting to author and maintain an ITS.},
keywords = {ARL, DoD, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Olney, Andrew; Nye, Benjamin; Sinatra, Anna M.
Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling Book
US Army Research Laboratory, Orlando, FL, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, UARC
@book{sottilare_design_2016,
title = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
author = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Andrew Olney and Benjamin Nye and Anna M. Sinatra},
url = {http://books.google.com/books?hl=en&lr=&id=0suvDAAAQBAJ&oi=fnd&pg=PA1&dq=%22Barnes,+Behrooz+Mostafavi,+and+Michael+J.%22+%22A.+Sottilare+and+Joseph%22+%2214+%E2%80%93+Exploring+the+Diversity+of+Domain+Modeling+for+Training%22+%2213+%E2%80%92+Mining+Expertise:+Learning+New+Tricks+from+an+Old%22+&ots=6MJgp2XEWV&sig=7CHZvZIllN3Xk8uFbMHmxN7gfLw},
year = {2016},
date = {2016-07-01},
volume = {4},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Design Recommendations for Intelligent Tutoring Systems (ITSs) explores the impact of intelligent tutoring system design on education and training. Specifically, this volume examines “Authoring Tools and Expert Modeling Techniques”. The “Design Recommendations book series examines tools and methods to reduce the time and skill required to develop Intelligent Tutoring Systems with the goal of improving the Generalized Intelligent Framework for Tutoring (GIFT). GIFT is a modular, service-oriented architecture developed to capture simplified authoring techniques, promote reuse and standardization of ITSs along with automated instructional techniques and effectiveness evaluation capabilities for adaptive tutoring tools and methods.},
keywords = {ARL, DoD, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {book}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 997–1005, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{wang_impact_2016,
title = {The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://dl.acm.org/citation.cfm?id=2937071},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {997–1005},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Researchers have observed that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain effective team performance even when the system is less than 100% reliable. However, current explanation algorithms are not sufficient for making a robot's quantitative reasoning (in terms of both uncertainty and conflicting goals) transparent to human teammates. In this work, we develop a novel mechanism for robots to automatically generate explanations of reasoning based on Partially Observable Markov Decision Problems (POMDPs). Within this mechanism, we implement alternate natural-language templates and then measure their differential impact on trust and team performance within an agent-based online test-bed that simulates a human-robot team task. The results demonstrate that the added explanation capability leads to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot interaction.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations Proceedings Article
In: 2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI), pp. 109–116, IEEE, New Zealand, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{wang_trust_2016,
title = {Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7451741},
doi = {10.1109/HRI.2016.7451741},
year = {2016},
date = {2016-03-01},
booktitle = {2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI)},
pages = {109–116},
publisher = {IEEE},
address = {New Zealand},
abstract = {Trust is a critical factor for achieving the full potential of human-robot teams. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain trust when the system is less than 100% reliable. In this work, we leverage existing agent algorithms to provide a domain-independent mechanism for robots to automatically generate such explanations. To measure the explanation mechanism's impact on trust, we collected self-reported survey data and behavioral data in an agent-based online testbed that simulates a human-robot team task. The results demonstrate that the added explanation capability led to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot trust calibration.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Graesser, Arthur C; Hu, Xiangen; Nye, Benjamin D.; Sottilare, Robert A.
Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT) Book Section
In: Using Games and Simulations for Teaching and Assessment, pp. 58–79, Routledge, New York, NY, 2016, ISBN: 978-0-415-73787-6.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, UARC
@incollection{graesser_intelligent_2016,
title = {Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT)},
author = {Arthur C Graesser and Xiangen Hu and Benjamin D. Nye and Robert A. Sottilare},
url = {https://www.researchgate.net/publication/304013322_Intelligent_Tutoring_Systems_Serious_Games_and_the_Generalized_Intelligent_Framework_for_Tutoring_GIFT},
isbn = {978-0-415-73787-6},
year = {2016},
date = {2016-01-01},
booktitle = {Using Games and Simulations for Teaching and Assessment},
pages = {58–79},
publisher = {Routledge},
address = {New York, NY},
abstract = {This chapter explores the prospects of integrating games with intelligent tutoring systems (ITSs). The hope is that there can be learning environments that optimize both motivation through games and deep learning through ITS technologies. Deep learning refers to the acquisition of knowledge, skills, strategies, and reasoning processes at the higher levels of Bloom’s (1956) taxonomy or the Knowledge-Learning-Instruction (KLI) framework (Koedinger, Corbett, & Perfetti, 2012), such as the application of knowledge to new cases, knowledge analysis and synthesis, problem solving, critical thinking, and other difficult cognitive processes. In contrast, shallow learning involves perceptual learning, memorization of explicit material, and mastery of simple rigid procedures. Shallow knowledge may be adequate for near transfer tests of knowledge/skills but not far transfer tests to new situations that have some modicum of complexity.},
keywords = {ARL, DoD, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}