Publications
Search
Hart, John; Gratch, Jonathan; Marsella, Stacy C.
How virtual reality training can win friends and influence people Book Section
In: Best, Christopher; Galanis, George; Kerry, James; Sottilare, Robert (Ed.): Fundamental Issues in Defense Training and Simulation, Ashgate, 2013, ISBN: 978-1-4094-4721-4.
@incollection{hart_how_2013,
title = {How virtual reality training can win friends and influence people},
author = {John Hart and Jonathan Gratch and Stacy C. Marsella},
editor = {Christopher Best and George Galanis and James Kerry and Robert Sottilare},
url = {http://www.amazon.com/Fundamental-Defense-Training-Simulation-Factors-ebook/dp/B00EUE2F2I},
isbn = {978-1-4094-4721-4},
year = {2013},
date = {2013-08-01},
booktitle = {Fundamental Issues in Defense Training and Simulation},
publisher = {Ashgate},
series = {Human Factors in Defense},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Khooshabeh, Peter; Melo, Celso M.; Volkman, Brooks; Gratch, Jonathan; Blascovich, Jim; Carnevale, Peter
Negotiation Strategies with Incongruent Facial Expressions of Emotion Cause Cardiovascular Threat Proceedings Article
In: Cognitive Science, Berlin, Germany, 2013.
@inproceedings{khooshabeh_negotiation_2013,
title = {Negotiation Strategies with Incongruent Facial Expressions of Emotion Cause Cardiovascular Threat},
author = {Peter Khooshabeh and Celso M. Melo and Brooks Volkman and Jonathan Gratch and Jim Blascovich and Peter Carnevale},
url = {http://ict.usc.edu/pubs/Negotiation%20Strategies%20with%20Incongruent%20Facial%20Expressions%20of%20Emotion%20Cause%20Cardiovascular%20Threat.pdf},
year = {2013},
date = {2013-08-01},
booktitle = {Cognitive Science},
address = {Berlin, Germany},
abstract = {Affect is important in motivated performance situations such as negotiation. Longstanding theories of emotion suggest that facial expressions provide enough information to perceive another person’s internal affective state. Alternatively, the contextual emotion hypothesis posits that situational factors bias the perception of emotion in others’ facial displays. This hypothesis predicts that individuals will have different perceptions of the same facial expression depending upon the context in which the expression is displayed. In this study, cardiovascular indexes of motivational states (i.e., challenge vs. threat) were recorded while players engaged in a multi-issue negotiation where the opposing negotiator (confederate) displayed emotional facial expressions (angry vs. happy); the confederate’s negotiation strategy (cooperative vs. competitive) was factorially crossed with his facial expression. During the game, participants’ eye fixations and cardiovascular responses, indexing task engagement and challenge/threat motivation, were recorded. Results indicated that participants playing confederates with incongruent facial expressions (e.g., cooperative strategy, angry face) exhibited a greater threat response, which arises due to increased uncertainty. Eye fixations also suggest that participants look at the face more in order to acquire information to reconcile their uncertainty in the incongruent condition. Taken together, these results suggest that context matters in the perception of emotion.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Yuqiong; Khooshabeh, Peter; Gratch, Jonathan
Looking Real and Making Mistakes Proceedings Article
In: 13th International Conference on Intelligent Virtual Humans, Edinburgh, Scotland, 2013.
@inproceedings{wang_looking_2013,
title = {Looking Real and Making Mistakes},
author = {Yuqiong Wang and Peter Khooshabeh and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Looking%20Real%20and%20Making%20Mistakes.pdf},
year = {2013},
date = {2013-08-01},
booktitle = {13th International Conference on Intelligent Virtual Humans},
address = {Edinburgh, Scotland},
abstract = {What happens when a Virtual Human makes mistakes? In this study we investigate the impact of VHs' conversational mistakes in the context of persuasion. The experiment also manipulated the level of photorealism of the VH. Users interacted with a VH that told persuasive information, and they were given the option to use the information to complete a problem-solving task. The VH occasionally made mistakes such as not responding, repeating the same answer, or giving irrelevant feedback. Results indicated that a VH is less persuasive when he or she makes textbackslashtextbackslashtextbackslashtextbackslashemphconversational mistakes. Individual differences also shed light on the cognitive processes of users who interacted with VH who made conversational errors. Participants with a low Need For Cognition are more effected by the conversational errors. VH photorealism or gender did not have significant effects on the persuasion measure. We discuss the implications of these results with regard to Human-Virtual Human interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Hegarty, Mary; Shipley, Thomas F.
Individual Differences in Mental Rotation Journal Article
In: Experimental Psychology, vol. 59, 2012.
@article{khooshabeh_individual_2012,
title = {Individual Differences in Mental Rotation},
author = {Peter Khooshabeh and Mary Hegarty and Thomas F. Shipley},
url = {http://ict.usc.edu/pubs/Individual%20Differences%20in%20Mental%20Rotation.pdf},
year = {2012},
date = {2012-11-01},
journal = {Experimental Psychology},
volume = {59},
abstract = {Two experiments tested the hypothesis that imagery ability and figural complexity interact to affect the choice of mental rotation strategies. Participants performed the Shepard and Metzler (1971) mental rotation task. On half of the trials, the 3-D figures were manipulated to create "fragmented" figures, with some cubes missing. Good imagers were less accurate and had longer response times on fragmented figures than on complete figures. Poor imagers performed similarly on fragmented and complete figures. These results suggest that good imagers use holistic mental rotation strategies by default, but switch to alternative strategies depending on task demands, whereas poor imagers are less flexible and use piecemeal strategies regardless of the task demands.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 0000.
@article{gervits_classication-based_nodate,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
Sorry, no publications matched your criteria.