Publications
Search
Traum, David; Henry, Cassidy; Lukin, Stephanie; Artstein, Ron; Gervitz, Felix; Pollard, Kim; Bonial, Claire; Lei, Su; Voss, Clare R.; Marge, Matthew; Hayes, Cory J.; Hill, Susan G.
Dialogue Structure Annotation for Multi-Floor Interaction Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 104–111, ELRA, Miyazaki, Japan, 2018, ISBN: 979-10-95546-00-9.
@inproceedings{traum_dialogue_2018,
title = {Dialogue Structure Annotation for Multi-Floor Interaction},
author = {David Traum and Cassidy Henry and Stephanie Lukin and Ron Artstein and Felix Gervitz and Kim Pollard and Claire Bonial and Su Lei and Clare R. Voss and Matthew Marge and Cory J. Hayes and Susan G. Hill},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/672.html},
isbn = {979-10-95546-00-9},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {104–111},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {We present an annotation scheme for meso-level dialogue structure, specifically designed for multi-floor dialogue. The scheme includes a transaction unit that clusters utterances from multiple participants and floors into units according to realization of an initiator’s intent, and relations between individual utterances within the unit. We apply this scheme to annotate a corpus of multi-floor human-robot interaction dialogues. We examine the patterns of structure observed in these dialogues and present inter-annotator statistics and relative frequencies of types of relations and transaction units. Finally, some example applications of these annotations are introduced.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen
Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing Proceedings Article
In: Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018, pp. 17–22, ACM Press, Beijing, China, 2018, ISBN: 978-1-4503-6376-1.
@inproceedings{kang_socio-cultural_2018,
title = {Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang},
url = {http://dl.acm.org/citation.cfm?doid=3205326.3205348},
doi = {10.1145/3205326.3205348},
isbn = {978-1-4503-6376-1},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018},
pages = {17–22},
publisher = {ACM Press},
address = {Beijing, China},
abstract = {We explored how users perceive virtual characters that performed the role of a counseling interviewer, while presenting different levels of social class, as well as single or multi-tasking behavior. To investigate this subject, we designed a 2x2 experiment (tasking type and social class of the virtual counseling interviewer). In the experiment, participants experienced the counseling interview interactions over video conferencing on a smartphone. We measured user responses to and perceptions of the virtual human interviewer. The results demonstrate that the tasking types and social class of the virtual counselor affected user responses to and perceptions of the virtual counselor. The results offer insight into the design and development of effective, realistic, and believable virtual human counselors. Furthermore, the results also address current social questions about how smartphones might mediate social interactions, including human-agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Rovira, Ericka; Barnes, Michael J.; Hill, Susan G.
In: Persuasive Technology, vol. 10809, pp. 56–69, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-78977-4 978-3-319-78978-1.
@incollection{wang_is_2018,
title = {Is It My Looks? Or Something I Said? The Impact of Explanations, Embodiment, and Expectations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Ericka Rovira and Michael J. Barnes and Susan G. Hill},
url = {http://link.springer.com/10.1007/978-3-319-78978-1_5},
doi = {10.1007/978-3-319-78978-1_5},
isbn = {978-3-319-78977-4 978-3-319-78978-1},
year = {2018},
date = {2018-04-01},
booktitle = {Persuasive Technology},
volume = {10809},
pages = {56–69},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Trust is critical to the success of human-robot interaction. Research has shown that people will more accurately trust a robot if they have an accurate understanding of its decision-making process. The Partially Observable Markov Decision Process (POMDP) is one such decision-making process, but its quantitative reasoning is typically opaque to people. This lack of transparency is exacerbated when a robot can learn, making its decision making better, but also less predictable. Recent research has shown promise in calibrating human-robot trust by automatically generating explanations of POMDP-based decisions. In this work, we explore factors that can potentially interact with such explanations in influencing human decision-making in human-robot teams. We focus on explanations with quantitative expressions of uncertainty and experiment with common design factors of a robot: its embodiment and its communication strategy in case of an error. Results help us identify valuable properties and dynamics of the human-robot trust relationship.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Trout, Theron; Russell, Stephen M.; Harrison, Andre V.; Spicer, Ryan; Dennison, Mark S.; Thomas, Jerald; Rosenberg, Evan Suma
Collaborative mixed reality (MxR) and networked decision making Proceedings Article
In: Next-Generation Analyst VI, pp. 21, SPIE, Orlando, Florida, 2018, ISBN: 978-1-5106-1817-6 978-1-5106-1818-3.
@inproceedings{trout_collaborative_2018,
title = {Collaborative mixed reality (MxR) and networked decision making},
author = {Theron Trout and Stephen M. Russell and Andre V. Harrison and Ryan Spicer and Mark S. Dennison and Jerald Thomas and Evan Suma Rosenberg},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10653/2309959/Collaborative-mixed-reality-MxR-and-networked-decision-making/10.1117/12.2309959.full},
doi = {10.1117/12.2309959},
isbn = {978-1-5106-1817-6 978-1-5106-1818-3},
year = {2018},
date = {2018-04-01},
booktitle = {Next-Generation Analyst VI},
pages = {21},
publisher = {SPIE},
address = {Orlando, Florida},
abstract = {Collaborative decision-making remains a significant research challenge that is made even more complicated in real-time or tactical problem-contexts. Advances in technology have dramatically assisted the ability for computers and networks to improve the decision-making process (i.e. intelligence, design, and choice). In the intelligence phase of decision making, mixed reality (MxR) has shown a great deal of promise through implementations of simulation and training. However little research has focused on an implementation of MxR to support the entire scope of the decision cycle, let alone collaboratively and in a tactical context. This paper presents a description of the design and initial implementation for the Defense Integrated Collaborative Environment (DICE), an experimental framework for supporting theoretical and empirical research on MxR for tactical decision-making support.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Barnes, Michael J.; Hill, Susan G.
Comparing Two Automatically Generated Explanations on the Perception of a Robot Teammate Proceedings Article
In: Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction, ACM, Chicago, IL, 2018.
@inproceedings{wang_comparing_2018,
title = {Comparing Two Automatically Generated Explanations on the Perception of a Robot Teammate},
author = {Ning Wang and David V. Pynadath and Michael J. Barnes and Susan G. Hill},
url = {http://people.ict.usc.edu/ nwang/PDF/HRI-ERS-2018-Wang.pdf},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction},
publisher = {ACM},
address = {Chicago, IL},
abstract = {Trust is critical to the success of human-robot interaction (HRI). Research has shown that people will more accurately trust a robot if they have a more accurate understanding of its decisionmaking process. Recent research has shown promise in calibrating human-agent trust by automatically generating explanations of decision-making process such as POMDP-based ones. In this paper, we compare two automatically generated explanations, one with quantitative information on uncertainty and one based on sensor observations, and study the impact of such explanations on perception of a robot in human-robot team.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Lucas, Gale
Virtual Human Role Players for Studying Social Factors in Organizational Decision Making Journal Article
In: Frontiers in Psychology, vol. 9, 2018, ISSN: 1664-1078.
@article{khooshabeh_virtual_2018,
title = {Virtual Human Role Players for Studying Social Factors in Organizational Decision Making},
author = {Peter Khooshabeh and Gale Lucas},
url = {http://journal.frontiersin.org/article/10.3389/fpsyg.2018.00194/full},
doi = {10.3389/fpsyg.2018.00194},
issn = {1664-1078},
year = {2018},
date = {2018-03-01},
journal = {Frontiers in Psychology},
volume = {9},
abstract = {The cyber domain of military operations presents many challenges. A unique element is the social dynamic between cyber operators and their leadership because of the novel subject matter expertise involved in conducting technical cyber tasks, so there will be situations where senior leaders might have much less domain knowledge or no experience at all relative to the warfighters who report to them. Nonetheless, it will be important for junior cyber operators to convey convincing information relevant to a mission in order to persuade or influence a leader to make informed decisions. The power dynamic will make it difficult for the junior cyber operator to successfully influence a higher ranking leader. Here we present a perspective with a sketch for research paradigm(s) to study how different factors (normative vs. informational social influence, degree of transparency, and perceived appropriateness of making suggestions) might interact with differential social power dynamics of individuals in cyber decision-making contexts. Finally, we contextualize this theoretical perspective for the research paradigms in viable training technologies.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Weber, René; Mangus, J. Michael; Huskey, Richard; Hopp, Frederic R.; Amir, Ori; Swanson, Reid; Gordon, Andrew; Khooshabeh, Peter; Hahn, Lindsay; Tamborini, Ron
Extracting Latent Moral Information from Text Narratives: Relevance, Challenges, and Solutions Journal Article
In: Communication Methods and Measures, vol. 12, no. 2-3, pp. 119–139, 2018, ISSN: 1931-2458, 1931-2466.
@article{weber_extracting_2018,
title = {Extracting Latent Moral Information from Text Narratives: Relevance, Challenges, and Solutions},
author = {René Weber and J. Michael Mangus and Richard Huskey and Frederic R. Hopp and Ori Amir and Reid Swanson and Andrew Gordon and Peter Khooshabeh and Lindsay Hahn and Ron Tamborini},
url = {https://www.tandfonline.com/doi/full/10.1080/19312458.2018.1447656},
doi = {10.1080/19312458.2018.1447656},
issn = {1931-2458, 1931-2466},
year = {2018},
date = {2018-03-01},
journal = {Communication Methods and Measures},
volume = {12},
number = {2-3},
pages = {119–139},
abstract = {Moral Foundations Theory (MFT) and the Model of Intuitive Morality and Exemplars (MIME) contend that moral judgments are built on a universal set of basic moral intuitions. A large body of research has supported many of MFT’s and the MIME’s central hypotheses. Yet, an important prerequisite of this research—the ability to extract latent moral content represented in media stimuli with a reliable procedure—has not been systematically studied. In this article, we subject different extraction procedures to rigorous tests, underscore challenges by identifying a range of reliabilities, develop new reliability test and coding procedures employing computational methods, and provide solutions that maximize the reliability and validity of moral intuition extraction. In six content analytical studies, including a large crowd-based study, we demonstrate that: (1) traditional content analytical approaches lead to rather low reliabilities; (2) variation in coding reliabilities can be predicted by both text features and characteristics of the human coders; and (3) reliability is largely unaffected by the detail of coder training. We show that a coding task with simplified training and a coding technique that treats moral foundations as fast, spontaneous intuitions leads to acceptable inter-rater agreement, and potentially to more valid moral intuition extractions. While this study was motivated by issues related to MFT and MIME research, the methods and findings in this study have implications for extracting latent content from text narratives that go beyond moral information. Accordingly, we provide a tool for researchers interested in applying this new approach in their own work.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Mozgai, Sharon; Scherer, Stefan; Woolley, Joshua; Chuang, Brandon
Manual and Automatic Measures Confirm-Intranasal Oxytocin Increases Facial Expressivity Journal Article
In: Affective Computing and Intelligent Interaction, 2017.
@article{neubauer_manual_2017,
title = {Manual and Automatic Measures Confirm-Intranasal Oxytocin Increases Facial Expressivity},
author = {Catherine Neubauer and Sharon Mozgai and Stefan Scherer and Joshua Woolley and Brandon Chuang},
url = {https://www.researchgate.net/publication/321644417_Manual_and_Automatic_Measures_Confirm-Intranasal_Oxytocin_Increases_Facial_Expressivity?enrichId=rgreq-22efb1e32ef30cdd22e6bee2b3b63d56-XXX&enrichSource=Y292ZXJQYWdlOzMyMTY0NDQxNztBUzo1NjkwNTI4NzM4NTQ5NzZAMTUxMjY4NDE4NTcyOQ%3D%3D&el=1_x_2&_esc=publicationCoverPdf},
year = {2017},
date = {2017-12-01},
journal = {Affective Computing and Intelligent Interaction},
abstract = {The effects of oxytocin on facial emotional expressivity were investigated in individuals with schizophrenia and age-matched healthy controls during the completion of a Social Judgment Task (SJT) with a double-blind, placebo-controlled, cross-over design. Although pharmacological interventions exist to help alleviate some symptoms of schizophrenia, currently available agents are not effective at improving the severity of blunted facial affect. Participant facial expressivity was previously quantified from video recordings of the SJT using a wellvalidated manual approach (Facial Expression Coding System; FACES). We confirm these findings using an automated computer-based approach. Using both methods we found that the administration of oxytocin significantly increased total facial expressivity in individuals with schizophrenia and increased facial expressivity at trend level in healthy controls. Secondary analysis showed that oxytocin also significantly increased the frequency of negative valence facial expressions in individuals with schizophrenia but not in healthy controls and that oxytocin did not significantly increase positive valence facial expressions in either group. Both manual coding and automatic facial analysis revealed the same pattern of findings. Considering manual annotation can be expensive and timeconsuming, these results suggest that automatic facial analysis may be an efficient and cost-effective alternative to currently utilized manual approaches and may be ready for use in clinical settings.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rosario, Dalton; Borel, Christoph; Conover, Damon; McAlinden, Ryan; Ortiz, Anthony; Shiver, Sarah; Simon, Blair
Small Drone Field Experiment: Data Collection & Processing Journal Article
In: NATO SET-241 Symposium, 2017.
@article{rosario_small_2017,
title = {Small Drone Field Experiment: Data Collection & Processing},
author = {Dalton Rosario and Christoph Borel and Damon Conover and Ryan McAlinden and Anthony Ortiz and Sarah Shiver and Blair Simon},
url = {https://arxiv.org/abs/1711.10693},
year = {2017},
date = {2017-11-01},
journal = {NATO SET-241 Symposium},
abstract = {Following an initiative formalized in April 2016—formally known as ARL West—between the U.S. Army Research Laboratory (ARL) and University of Southern California’s Institute for Creative Technologies (USC ICT), a field experiment was coordinated and executed in the summer of 2016 by ARL, USC ICT, and Headwall Photonics. The purpose was to image part of the USC main campus in Los Angeles, USA, using two portable COTS (commercial off the shelf) aerial drone solutions for data acquisition, for photogrammetry (3D reconstruction from images), and fusion of hyperspectral data with the recovered set of 3D point clouds representing the target area. The research aims for determining the viability of having a machine capable of segmenting the target area into key material classes (e.g., manmade structures, live vegetation, water) for use in multiple purposes, to include providing the user with a more accurate scene understanding and enabling the unsupervised automatic sampling of meaningful material classes from the target area for adaptive semi-supervised machine learning. In the latter, a target-set library may be used for automatic machine training with data of local material classes, as an example, to increase the prediction chances of machines recognizing targets. The field experiment and associated data post processing approach to correct for reflectance, geo-rectify, recover the area’s dense point clouds from images, register spectral with elevation properties of scene surfaces from the independently collected datasets, and generate the desired scene segmented maps are discussed. Lessons learned from the experience are also highlighted throughout the paper.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Swanson, Reid William; Gordon, Andrew S.; Khooshabeh, Peter; Sagae, Kenji; Huskey, Richard; Mangus, Michael; Amir, Ori; Weber, Rene
An Empirical Analysis of Subjectivity and Narrative Levels in Weblog Storytelling Across Cultures Journal Article
In: Dialogue & Discourse, vol. 8, no. 2, pp. 105–128, 2017.
@article{swanson_empirical_2017,
title = {An Empirical Analysis of Subjectivity and Narrative Levels in Weblog Storytelling Across Cultures},
author = {Reid William Swanson and Andrew S. Gordon and Peter Khooshabeh and Kenji Sagae and Richard Huskey and Michael Mangus and Ori Amir and Rene Weber},
url = {https://www.researchgate.net/publication/321170929_An_Empirical_Analysis_of_Subjectivity_and_Narrative_Levels_in_Personal_Weblog_Storytelling_Across_Cultures?_sg=Ck1pqxhW1uuTUe54DX5BLVYey6L6DkwTpjnes1ctAEuGQDHxoEOr887eKWjHIA0_-kk4ya9dXwEZ4OM},
doi = {10.5087/dad.2017.205},
year = {2017},
date = {2017-11-01},
journal = {Dialogue & Discourse},
volume = {8},
number = {2},
pages = {105–128},
abstract = {Storytelling is a universal activity, but the way in which discourse structure is used to persuasively convey ideas and emotions may depend on cultural factors. Because first-person accounts of life experiences can have a powerful impact in how a person is perceived, the storyteller may instinctively employ specific strategies to shape the audience’s perception. Hypothesizing that some of the differences in storytelling can be captured by the use of narrative levels and subjectivity, we analyzed over one thousand narratives taken from personal weblogs. First, we compared stories from three different cultures written in their native languages: English, Chinese and Farsi. Second, we examined the impact of these two discourse properties on a reader’s attitude and behavior toward the narrator. We found surprising similarities and differences in how stories are structured along these two dimensions across cultures. These discourse properties have a small but significant impact on a reader’s behavioral response toward the narrator.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Chollet, Mathieu; Mozgai, Sharon; Dennison, Mark; Khooshabeh, Peter; Scherer, Stefan
The relationship between task-induced stress, vocal changes, and physiological state during a dyadic team task Proceedings Article
In: Proceedings of the 19th ACM International Conference on Multimodal Interaction, pp. 426–432, ACM Press, Glasgow, UK, 2017, ISBN: 978-1-4503-5543-8.
@inproceedings{neubauer_relationship_2017,
title = {The relationship between task-induced stress, vocal changes, and physiological state during a dyadic team task},
author = {Catherine Neubauer and Mathieu Chollet and Sharon Mozgai and Mark Dennison and Peter Khooshabeh and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=3136755.3136804},
doi = {10.1145/3136755.3136804},
isbn = {978-1-4503-5543-8},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of the 19th ACM International Conference on Multimodal Interaction},
pages = {426–432},
publisher = {ACM Press},
address = {Glasgow, UK},
abstract = {It is commonly known that a relationship exists between the human voice and various emotional states. Past studies have demonstrated changes in a number of vocal features, such as fundamental frequency f0 and peakSlope, as a result of varying emotional state. These voice characteristics have been shown to relate to emotional load, vocal tension, and, in particular, stress. Although much research exists in the domain of voice analysis, few studies have assessed the relationship between stress and changes in the voice during a dyadic team interaction. The aim of the present study was to investigate the multimodal interplay between speech and physiology during a high-workload, high-stress team task. Specifically, we studied task-induced effects on participants' vocal signals, specifically, the f0 and peakSlope features, as well as participants' physiology, through cardiovascular measures. Further, we assessed the relationship between physiological states related to stress and changes in the speaker's voice. We recruited participants with the specific goal of working together to diffuse a simulated bomb. Half of our sample participated in an "Ice Breaker" scenario, during which they were allowed to converse and familiarize themselves with their teammate prior to the task, while the other half of the sample served as our "Control". Fundamental frequency (f0), peakSlope, physiological state, and subjective stress were measured during the task. Results indicated that f0 and peakSlope significantly increased from the beginning to the end of each task trial, and were highest in the last trial, which indicates an increase in emotional load and vocal tension. Finally, cardiovascular measures of stress indicated that the vocal and emotional load of speakers towards the end of the task mirrored a physiological state of psychological "threat".},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marge, Matthew; Bonial, Claire; Foots, Ashley; Hayes, Cory; Henry, Cassidy; Pollard, Kimberly; Artstein, Ron; Voss, Clare; Traum, David
Exploring Variation of Natural Human Commands to a Robot in a Collaborative Navigation Task Proceedings Article
In: Proceedings of the First Workshop on Language Grounding for Robotics, pp. 58–66, Association for Computational Linguistics, Vancouver, Canada, 2017.
@inproceedings{marge_exploring_2017,
title = {Exploring Variation of Natural Human Commands to a Robot in a Collaborative Navigation Task},
author = {Matthew Marge and Claire Bonial and Ashley Foots and Cory Hayes and Cassidy Henry and Kimberly Pollard and Ron Artstein and Clare Voss and David Traum},
url = {http://www.aclweb.org/anthology/W17-2808},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the First Workshop on Language Grounding for Robotics},
pages = {58–66},
publisher = {Association for Computational Linguistics},
address = {Vancouver, Canada},
abstract = {Robot-directed communication is variable, and may change based on human perception of robot capabilities. To collect training data for a dialogue system and to investigate possible communication changes over time, we developed a Wizard-of-Oz study that (a) simulates a robot’s limited understanding, and (b) collects dialogues where human participants build a progressively better mental model of the robot’s understanding. With ten participants, we collected ten hours of human-robot dialogue. We analyzed the structure of instructions that participants gave to a remote robot before it responded. Our findings show a general initial preference for including metric information (e.g., move forward 3 feet) over landmarks (e.g., move to the desk) in motion commands, but this decreased over time, suggesting changes in perception.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.; Merchant, Chirag
The Dynamics of Human-Agent Trust with POMDP-Generated Explanations Proceedings Article
In: Proceedings of the 17th International Conference on Intelligent Virtual Agents (IVA 2017), Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
@inproceedings{wang_dynamics_2017,
title = {The Dynamics of Human-Agent Trust with POMDP-Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill and Chirag Merchant},
url = {https://link.springer.com/chapter/10.1007/978-3-319-67401-8_58},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 17th International Conference on Intelligent Virtual Agents (IVA 2017)},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {Partially Observable Markov Decision Processes (POMDPs) enable optimized decision making by robots, agents, and other autonomous systems. This quantitative optimization can also be a limitation in human-agent interaction, as the resulting autonomous behavior, while possibly optimal, is often impenetrable to human teammates, leading to improper trust and, subsequently, disuse or misuse of such systems [1]. Automatically generated explanations of POMDP-based decisions have shown promise in calibrating human-agent trust [3]. However, these “one-size-fits-all” static explanation policies are insufficient to accommodate different communication preferences across people. In this work, we analyze human behavior in a human-robot interaction (HRI) scenario, to find behavioral indicators of trust in the agent’s ability. We evaluate four hypothesized behavioral measures that an agent could potentially use to dynamically infer its teammate’s current trust level. The conclusions drawn can potentially inform the design of intelligent agents that can automatically adapt their explanation policies as they observe the behavioral responses of their human teammates.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Social decisions and fairness change when people’s interests are represented by autonomous agents Journal Article
In: Autonomous Agents and Multi-Agent Systems, pp. 163–187, 2017, ISSN: 1387-2532, 1573-7454.
@article{de_melo_social_2017,
title = {Social decisions and fairness change when people’s interests are represented by autonomous agents},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s10458-017-9376-6},
doi = {10.1007/s10458-017-9376-6},
issn = {1387-2532, 1573-7454},
year = {2017},
date = {2017-07-01},
journal = {Autonomous Agents and Multi-Agent Systems},
pages = {163–187},
abstract = {There has been growing interest on agents that represent people’s interests or act on their behalf such as automated negotiators, self-driving cars, or drones. Even though people will interact often with others via these agent representatives, little is known about whether people’s behavior changes when acting through these agents, when compared to direct interaction with others. Here we show that people’s decisions will change in important ways because of these agents; specifically, we showed that interacting via agents is likely to lead people to behave more fairly, when compared to direct interaction with others. We argue this occurs because programming an agent leads people to adopt a broader perspective, consider the other side’s position, and rely on social norms—such as fairness—to guide their decision making. To support this argument, we present four experiments: in Experiment 1 we show that people made fairer offers in the ultimatum and impunity games when interacting via agent representatives, when compared to direct interaction; in Experiment 2, participants were less likely to accept unfair offers in these games when agent representatives were involved; in Experiment 3, we show that the act of thinking about the decisions ahead of time—i.e., under the so-called “strategy method”—can also lead to increased fairness, even when no agents are involved; and, finally, in Experiment 4 we show that participants were less likely to reach an agreement with unfair counterparts in a negotiation setting.We discuss theoretical implications for our understanding of the nature of people’s social behavior with agent representatives, as well as practical implications for the design of agents that have the potential to increase fairness in society.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cassidy, Henry; Moolchandani, Pooja; Pollard, Kimberly A.; Bonial, Claire; Foots, Ashley; Artstein, Ron; Hayes, Cory; Voss, Claire R.; Traum, David; Marge, Matthew
Towards Efficient Human-Robot Dialogue Collection: Moving Fido into the VirtualWorld Proceedings Article
In: Proceedings of the WiNLP workshop, Vancouver, Canada, 2017.
@inproceedings{cassidy_towards_2017,
title = {Towards Efficient Human-Robot Dialogue Collection: Moving Fido into the VirtualWorld},
author = {Henry Cassidy and Pooja Moolchandani and Kimberly A. Pollard and Claire Bonial and Ashley Foots and Ron Artstein and Cory Hayes and Claire R. Voss and David Traum and Matthew Marge},
url = {http://www.winlp.org/wp-content/uploads/2017/final_papers_2017/52_Paper.pdf},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the WiNLP workshop},
address = {Vancouver, Canada},
abstract = {Our research aims to develop a natural dialogue interface between robots and humans. We describe two focused efforts to increase data collection efficiency towards this end: creation of an annotated corpus of interaction data, and a robot simulation, allowing greater flexibility in when and where we can run experiments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Conover, Damon M.; Beidleman, Brittany; McAlinden, Ryan; Borel-Donohue, Christoph C.
Visualizing UAS-Collected Imagery Using Augmented Reality Proceedings Article
In: Proceedings of the Next-Generation Analyst V conference, pp. 102070C, SPIE, Anaheim, CA, 2017.
@inproceedings{conover_visualizing_2017,
title = {Visualizing UAS-Collected Imagery Using Augmented Reality},
author = {Damon M. Conover and Brittany Beidleman and Ryan McAlinden and Christoph C. Borel-Donohue},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2262864},
doi = {10.1117/12.2262864},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Next-Generation Analyst V conference},
pages = {102070C},
publisher = {SPIE},
address = {Anaheim, CA},
abstract = {One of the areas where augmented reality will have an impact is in the visualization of 3-D data. 3-D data has traditionally been viewed on a 2-D screen, which has limited its utility. Augmented reality head-mounted displays, such as the Microsoft HoloLens, make it possible to view 3-D data overlaid on the real world. This allows a user to view and interact with the data in ways similar to how they would interact with a physical 3-D object, such as moving, rotating, or walking around it. A type of 3-D data that is particularly useful for military applications is geo-specific 3-D terrain data, and the visualization of this data is critical for training, mission planning, intelligence, and improved situational awareness. Advances in Unmanned Aerial Systems (UAS), photogrammetry software, and rendering hardware have drastically reduced the technological and financial obstacles in collecting aerial imagery and in generating 3-D terrain maps from that imagery. Because of this, there is an increased need to develop new tools for the exploitation of 3-D data. We will demonstrate how the HoloLens can be used as a tool for visualizing 3-D terrain data. We will describe: 1) how UAS-collected imagery is used to create 3-D terrain maps, 2) how those maps are deployed to the HoloLens, 3) how a user can view and manipulate the maps, and 4) how multiple users can view the same virtual 3-D object at the same time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Berkiten, Sema; Halber, Maciej; Solomon, Justin; Ma, Chongyang; Li, Hao; Rusinkiewicz, Szymon
Learning Detail Transfer based on Geometric Features Journal Article
In: Computer Graphics Forum, vol. 36, no. 2, pp. 361–373, 2017, ISSN: 01677055.
@article{berkiten_learning_2017,
title = {Learning Detail Transfer based on Geometric Features},
author = {Sema Berkiten and Maciej Halber and Justin Solomon and Chongyang Ma and Hao Li and Szymon Rusinkiewicz},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.13132/full},
doi = {10.1111/cgf.13132},
issn = {01677055},
year = {2017},
date = {2017-05-01},
journal = {Computer Graphics Forum},
volume = {36},
number = {2},
pages = {361–373},
abstract = {The visual richness of computer graphics applications is frequently limited by the difficulty of obtaining high-quality, detailed 3D models. This paper proposes a method for realistically transferring details (specifically, displacement maps) from existing high-quality 3D models to simple shapes that may be created with easy-to-learn modeling tools. Our key insight is to use metric learning to find a combination of geometric features that successfully predicts detail-map similarities on the source mesh; we use the learned feature combination to drive the detail transfer. The latter uses a variant of multi-resolution non-parametric texture synthesis, augmented by a high-frequency detail transfer step in texture space. We demonstrate that our technique can successfully transfer details among a variety of shapes including furniture and clothing.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Spicer, Ryan P.; Russell, Stephen M.; Rosenberg, Evan Suma
The mixed reality of things: emerging challenges for human-information interaction Proceedings Article
In: Proceedings Volume 10207, Next-Generation Analyst V, SPIE, Anaheim, CA, 2017.
@inproceedings{spicer_mixed_2017,
title = {The mixed reality of things: emerging challenges for human-information interaction},
author = {Ryan P. Spicer and Stephen M. Russell and Evan Suma Rosenberg},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2268004},
doi = {10.1117/12.2268004},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings Volume 10207, Next-Generation Analyst V},
publisher = {SPIE},
address = {Anaheim, CA},
abstract = {Virtual and mixed reality technology has advanced tremendously over the past several years. This nascent medium has the potential to transform how people communicate over distance, train for unfamiliar tasks, operate in challenging environments, and how they visualize, interact, and make decisions based on complex data. At the same time, the marketplace has experienced a proliferation of network-connected devices and generalized sensors that are becoming increasingly accessible and ubiquitous. As the "Internet of Things" expands to encompass a predicted 50 billion connected devices by 2020, the volume and complexity of information generated in pervasive and virtualized environments will continue to grow exponentially. The convergence of these trends demands a theoretically grounded research agenda that can address emerging challenges for human-information interaction (HII). Virtual and mixed reality environments can provide controlled settings where HII phenomena can be observed and measured, new theories developed, and novel algorithms and interaction techniques evaluated. In this paper, we describe the intersection of pervasive computing with virtual and mixed reality, identify current research gaps and opportunities to advance the fundamental understanding of HII, and discuss implications for the design and development of cyber-human systems for both military and civilian use.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Neubauer, Catherine; Scherer, Scherer
The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment Proceedings Article
In: Proceedings of the Preconference on Affective Computing at the Society for Affective Science, Boston, MA, 2017.
@inproceedings{neubauer_effects_2017,
title = {The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment},
author = {Catherine Neubauer and Scherer Scherer},
url = {http://ict.usc.edu/pubs/The%20Effects%20of%20Pre-task%20Team%20Collaboration%20on%20Facial%20Expression%20and%20Speech%20Entrainment.pdf},
year = {2017},
date = {2017-04-01},
booktitle = {Proceedings of the Preconference on Affective Computing at the Society for Affective Science},
address = {Boston, MA},
abstract = {Many everyday tasks are complex and require the coordination of one or more individuals. Such tasks can be relatively simple like passing a ball to a friend during a game of catch, while others are more complex such as performing a life-saving surgery where surgeons, anesthesiologists and nurses all work together in a multi-person team [1]. Such coordination requires the appropriate allocation of cognitive and behavioral effort to meet the changing demands of their environment and cannot be completed alone [1]. These mutually cooperative behaviors can include team communication, body position and even affective cues [2]. Some behaviors are explicitly controlled to be coordinated [3] (e.g., when an individual purposely attempts to follow the behaviors of their teammate or team leader), while others are implicit or unconscious. Presently, these shared behaviors have been referred to as entrainment [4] [5], mimicry [6] [7] and even action matching [8] [9]; however, the specific term used typically refers to the underlying theoretical cause for the phenomenon. Theoretically, entrainment can be explained as the spontaneous interpersonal coupling that occurs because the behavior of one or more individuals is affected by another’s behavior in a closed loop system. Additionally, such behavior is typically evident when working on a mutual, goal-directed task [10]. Therefore, for the purposes of this paper we will refer to the cooperative behaviors between teammates that support problem solving as entrainment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen; Amir, Ori; Lin, Rebecca
Social influence of humor in virtual human counselor's self-disclosure Journal Article
In: Computer Animation and Virtual Worlds, vol. 28, no. 3-4, 2017, ISSN: 15464261.
@article{kang_social_2017,
title = {Social influence of humor in virtual human counselor's self-disclosure},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang and Ori Amir and Rebecca Lin},
url = {http://doi.wiley.com/10.1002/cav.1763},
doi = {10.1002/cav.1763},
issn = {15464261},
year = {2017},
date = {2017-04-01},
journal = {Computer Animation and Virtual Worlds},
volume = {28},
number = {3-4},
abstract = {We explored the social influence of humor in a virtual human counselor's selfdisclosure while also varying the ethnicity of the virtual counselor. In a 2 × 3 experiment (humor and ethnicity of the virtual human counselor), participants experienced counseling interview interactions via Skype on a smartphone. We measured user responses to and perceptions of the virtual human counselor. The results demonstrate that humor positively affects user responses to and perceptions of a virtual counselor. The results further suggest that matching styles of humor with a virtual counselor's ethnicity influences user responses and perceptions. The results offer insight into the effective design and development of realistic and believable virtual human counselors. Furthermore, they illuminate the potential use of humor to enhance self‐disclosure in human–agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2015
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Building Trust in a Human-Robot Team with Automatically Generated Explanations Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation
@inproceedings{wang_building_2015,
title = {Building Trust in a Human-Robot Team with Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Technological advances offer the promise of robotic systems that work with people to form human-robot teams that are more capable than their individual members. Unfortunately, the increasing capability of such autonomous systems has often failed to increase the capability of the human-robot team. Studies have identified many causes underlying these failures, but one critical aspect of a successful human-machine interaction is trust. When robots are more suited than humans for a certain task, we want the humans to trust the robots to perform that task. When the robots are less suited, we want the humans to appropriately gauge the robots’ ability and have people perform the task manually. Failure to do so results in disuse of robots in the former case and misuse in the latter. Real-world case studies and laboratory experiments show that failures in both cases are common. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies show that explanations offered by an automated system can help maintain trust with the humans in case the system makes an error, indicating that the robot’s communication transparency can be an important factor in earning an appropriate level of trust. To study how robots can communicate their decisionmaking process to humans, we have designed an agent-based online test-bed that supports virtual simulation of domain-independent human-robot interaction. In the simulation, humans work together with virtual robots as a team. The test-bed allows researchers to conduct online human-subject studies and gain better understanding of how robot communication can improve human-robot team performance by fostering better trust relationships between humans and their robot teammates. In this paper, we describe the details of our design, and illustrate its operation with an example human-robot team reconnaissance task.},
keywords = {ARL, DoD, Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Scherer, Stefan; Oiumette, Brett; Ryan, William S.; Lance, Brent J.; Gratch, Jonathan
Computational-based behavior analysis and peripheral psychophysiology Journal Article
In: Advances in Computational Psychophysiology, pp. 34–36, 2015.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@article{khooshabeh_computational-based_2015,
title = {Computational-based behavior analysis and peripheral psychophysiology},
author = {Peter Khooshabeh and Stefan Scherer and Brett Oiumette and William S. Ryan and Brent J. Lance and Jonathan Gratch},
url = {http://www.sciencemag.org/sites/default/files/custom-publishing/documents/CP_Supplement_Final_100215.pdf},
year = {2015},
date = {2015-10-01},
journal = {Advances in Computational Psychophysiology},
pages = {34–36},
abstract = {Computational-based behavior analysis aims to automatically identify, characterize, model, and synthesize multimodal nonverbal behavior within both human–machine as well as machine-mediated human–human interaction. It uses state-of-the-art machine learning algorithms to track human nonverbal and verbal information, such as facial expressions, gestures, and posture, as well as what and how a person speaks. The emerging technology from this field of research is relevant for a wide range of interactive and social applications, including health care and education. The characterization and association of nonverbal behavior with underlying clinical conditions, such as depression or posttraumatic stress, could have significant benefits for treatments and the overall efficiency of the health care system.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Spicer, Ryan; Evangelista, Edgar; New, Raymond; Campbell, Julia; Richmond, Todd; McGroarty, Christopher; Vogt, Brian
Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping Proceedings Article
In: Proceeding of 15 Simulation Interoperability Workshop, Orlando, FL, 2015.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{spicer_innovation_2015,
title = {Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping},
author = {Ryan Spicer and Edgar Evangelista and Raymond New and Julia Campbell and Todd Richmond and Christopher McGroarty and Brian Vogt},
url = {http://ict.usc.edu/pubs/Innovation%20and%20Rapid%20Evolutionary%20Design%20by%20Virtual%20Doing-Understanding%20Early%20Synthetic.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of 15 Simulation Interoperability Workshop},
address = {Orlando, FL},
abstract = {The proliferation and maturation of tools supporting virtual environments combined with emerging immersive capabilities (e.g. Oculus Rift and other head mounted displays) point towards the ability to take nascent ideas and realize them in engaging ways through an Early Synthetic Prototyping (ESP) system. In effect, “bend electrons before bending metal,” enabling Soldier (end-user) feedback early in the design process, while fostering an atmosphere of collaboration and innovation. Simulation has been used in a variety of ways for concept, design, and testing, but current methods do not put the user into the system in ways that provide deep feedback and enable a dialogue between Warfighter and Engineer (as well as other stakeholders) that can inform design. This paper will discuss how the process of ESP is teased out by using iterative rapid virtual prototyping based on an initial ESP schema, resulting in a rather organic design process – Innovation and Rapid Evolutionary Design by Virtual Doing. By employing canonical use cases, working through the draft schema allows the system to help design itself and inform the process evolution. This type of self-referential meta-design becomes increasingly powerful and relevant given the ability to rapidly create assets, capabilities and environments that immerse developers, stakeholders, and end users early and often in the process. Specific examples of using rapid virtual prototyping for teasing out the design and implications/applications of ESP will be presented, walking through the evolution of both schema and prototypes with specific use cases. In addition, this paper will cover more generalized concepts, approaches, analytics, and lessons-learned as well as implications for innovation throughout research, development, and industry.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, H. Chad; Core, Mark G.; Goldberg, Benjamin S.
Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. 3, pp. 303 – 318, U.S. Army Research Laboratory, 2015.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, UARC
@incollection{lane_lowering_2015,
title = {Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools},
author = {H. Chad Lane and Mark G. Core and Benjamin S. Goldberg},
url = {http://ict.usc.edu/pubs/Lowering%20the%20Technical%20Skill%20Requirements%20for%20Building%20Intelligent%20Tutors-A%20Review%20of%20Authoring%20Tools.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {3},
pages = {303 – 318},
publisher = {U.S. Army Research Laboratory},
abstract = {In this chapter, we focus on intelligent tutoring systems (ITSs), an instance of educational technology that is often criticized for not reaching its full potential (Nye, 2013). Researchers have debated why, given such strong empirical evidence in their favor (Anderson, Corbett, Koedinger & Pelletier, 1995; D’Mello & Graesser, 2012; VanLehn et al., 2005; Woolf, 2009), intelligent tutors are not in every classroom, on every device, providing educators with fine-grained assessment information about their students. Although many factors contribute to a lack of adoption (Nye, 2014), one widely agreed upon reason behind slow adoption and poor scalability of ITSs is that the engineering demands are simply too great. This is no surprise given that the effectiveness of ITSs is often attributable to the use of rich knowledge representations and cognitively plausible models of domain knowledge (Mark & Greer, 1995; Valerie J. Shute & Psotka, 1996; VanLehn, 2006; Woolf, 2009), which are inherently burdensome to build. To put it another way: the features that tend to make ITSs effective are also the hardest to build. The heavy reliance on cognitive scientists and artificial intelligence (AI) software engineers seems to be a bottleneck.},
keywords = {ARL, DoD, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Goldberg, Ben; Hu, Xiangen
Generalizing the Genres for ITS: Authoring Considerations for Representative Learning Tasks Book Section
In: Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Brawner, Keith (Ed.): Design Recommendations for Intelligent Tutoring Systems: Volume 2: Authoring Tools and Expert Modeling Techniques, vol. 3, pp. 47–63, U.S. Army Research Laboratory, 2015, ISBN: 978-0-9893923-7-2.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences
@incollection{nye_generalizing_2015,
title = {Generalizing the Genres for ITS: Authoring Considerations for Representative Learning Tasks},
author = {Benjamin D. Nye and Ben Goldberg and Xiangen Hu},
editor = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Keith Brawner},
url = {http://ict.usc.edu/pubs/Generalizing%20the%20Genres%20for%20ITS%20-%20Authoring%20Considerations%20for%20Representative%20Learning%20Tasks.pdf},
isbn = {978-0-9893923-7-2},
year = {2015},
date = {2015-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 2: Authoring Tools and Expert Modeling Techniques},
volume = {3},
pages = {47–63},
publisher = {U.S. Army Research Laboratory},
abstract = {Compared to many other learning technologies, intelligent tutoring systems (ITSs) have a distinct challenge: authoring an adaptive inner loop that provides pedagogical support on one or more learning tasks. This coupling of tutoring behavior to student interaction with a learning task means that authoring tools need to reflect both the learning task and the ITS pedagogy. To explore this issue, common learning activities in intelligent tutoring need to be categorized and analyzed for the information that is required to tutor each task. The types of learning activities considered cover a large range: step-by-step problem solving, bug repair, building generative functions (e.g., computer code), structured argumentation, self-reflection, short question answering, essay writing, classification, semantic matching, representation mapping (e.g., graph to equation), concept map revision, choice scenarios, simulated process scenarios, motor skills practice, collaborative discussion, collaborative design, and team coordination tasks. These different tasks imply a need for different authoring tools and processes used to create tutoring systems for each task. In this chapter, we consider three facets of authoring: 1) the minimum information required to create the task, 2) the minimum information needed to implement common pedagogical strategies, 3) the expertise required for each type of information. The goal of this analysis is to present a roadmap of effective practices in authoring tool interfaces for each tutoring task considered. A long-term vision for ITSs is to have generalizable authoring tools, which could be used to rapidly create content for a variety of ITSs. However, it is as-yet unclear if this goal is even attainable. Authoring tools have a number of serious challenges, from the standpoint of generalizability. These challenges include the domain, the data format, and the author. First, different ITS domains require different sets of authoring tools, because they have different learning tasks. Tools that are convenient for embedding tutoring in a 3D virtual world are completely different than ones that make it convenient to add tutoring to a system for practicing essay-writing, for example. Second, the data produced by an authoring tool needs to be consumed by an ITS that will make pedagogical decisions. As such, at least some of the data is specific to the pedagogy of the ITS, rather than directly reflecting domain content. As a simple example, if an ITS uses text hints, those hints need to be authored, but some systems may just highlight errors rather than providing text hints. As such, the first system actually needs more content authored and represented as data. With that said, typical ITSs use a relatively small and uniform set of authored content to interact with learners, such as correctness feedback, corrections, and hints (VanLehn, 2006). Third, different authors may need different tools (Nye, Rahman, Yang, Hays, Cai, Graesser, & Hu, 2014). This means that even the same content may need distinct authoring tools that match the expertise of different authors. In this chapter, we are focusing primarily on the first challenge: differences in domains. In particular, our stance is that the “content domain” is too coarse-grained to allow much reuse between authoring tools. This is because, to a significant extent, content domains are simply names for related content. However, the skills and pedagogy for the same domain can vary drastically across different topics and expertise levels. For example, Algebra and Geometry are both high-school level math domains. However, in geometry, graphical depictions (e.g., shapes, angles) are a central aspect of the pedagogy, while Algebra tends to use graphics very differently (e.g., coordinate plots). As such, some learning tasks tend to be shared between those subdomains (e.g., equation-solving) and other tasks are not (e.g., classifying shapes). This raises the central point of our paper: the learning tasks for a domain define how we author content for that domain. For example, while Algebra does not involve recognizing many shapes, understanding the elements of architecture involves recognizing a variety of basic and advanced shapes and forms. In total, this means that no single whole-cloth authoring tool will work well for any pair of Algebra, Geometry, and Architectural Forms. However, it also implies that a reasonable number of task-specific tools for each learning task might allow authoring for all three domains. To do this, we need to understand the common learning tasks for domains taught using ITS, and why those tasks are applied to those domains. In the following sections, we identify and categorize common learning tasks for different ITS domains. Then, we extract common principles for those learning tasks. Finally, we suggest a set of general learning activities that might be used to tutor a large number of domains.},
keywords = {ARL, DoD, Learning Sciences},
pubstate = {published},
tppubtype = {incollection}
}
Rizzo, Albert; Cukor, Judith; Gerardi, Maryrose; Alley, Stephanie; Reist, Chris; Roy, Mike; Rothbaum, Barbara O.; Difede, JoAnn
Virtual Reality Exposure for PTSD Due to Military Combat and Terrorist Attacks Journal Article
In: Journal of Contemporary Psychotherapy, pp. 1 –10, 2015, ISSN: 0022-0116, 1573-3564.
Abstract | Links | BibTeX | Tags: DoD, MedVR
@article{rizzo_virtual_2015,
title = {Virtual Reality Exposure for PTSD Due to Military Combat and Terrorist Attacks},
author = {Albert Rizzo and Judith Cukor and Maryrose Gerardi and Stephanie Alley and Chris Reist and Mike Roy and Barbara O. Rothbaum and JoAnn Difede},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Exposure%20for%20PTSD%20Due%20to%20Military%20Combat%20and%20Terrorist%20Attacks.pdf},
doi = {10.1007/s10879-015-9306-3},
issn = {0022-0116, 1573-3564},
year = {2015},
date = {2015-05-01},
journal = {Journal of Contemporary Psychotherapy},
pages = {1 –10},
abstract = {Humans exposed to war and terrorist attacks are at risk for the development of posttraumatic stress disorder (PTSD). Numerous reports indicate that the incidence of PTSD in both returning Operation Enduring Freedom/Operation Iraqi Freedom (OEF/OIF) military personnel and survivors of the 9/11 World Trade Center (WTC) attacks is significant. These situations have served to motivate research on how to better develop and disseminate evidence-based treatments for PTSD and other related psychosocial conditions. Virtual reality (VR) delivered exposure therapy for PTSD is currently being used to treat combat and terrorist attack related PTSD with initial reports of positive outcomes. This paper presents an overview and rationale for the use of VR exposure therapy with anxiety disorders and PTSD and describes the status of two systems (Virtual Iraq/Afghanistan and Virtual World Trade Center) developed for this purpose.},
keywords = {DoD, MedVR},
pubstate = {published},
tppubtype = {article}
}
Andreatta, Pamela; Klotz, Jessica J.; Madsen, James M.; Hurst, Charles G.; Talbot, Thomas B.
Outcomes From Two Forms of Training for First-Responder Competency in Cholinergic Crisis Management Journal Article
In: Military Medicine, vol. 180, no. 4, pp. 468–474, 2015, ISSN: 0026-4075, 1930-613X.
Abstract | Links | BibTeX | Tags: DoD, MedVR, UARC
@article{andreatta_outcomes_2015,
title = {Outcomes From Two Forms of Training for First-Responder Competency in Cholinergic Crisis Management},
author = {Pamela Andreatta and Jessica J. Klotz and James M. Madsen and Charles G. Hurst and Thomas B. Talbot},
url = {http://ict.usc.edu/pubs/Outcomes%20From%20Two%20Forms%20of%20Training%20for%20First-Responder%20Competency%20in%20Cholinergic%20Crisis%20Management.pdf},
doi = {10.7205/MILMED-D-14-00290},
issn = {0026-4075, 1930-613X},
year = {2015},
date = {2015-04-01},
journal = {Military Medicine},
volume = {180},
number = {4},
pages = {468–474},
abstract = {Military and civilian first responders must be able to recognize and effectively manage mass disaster casualties. Clinical management of injuries resulting from nerve agents provides different challenges for first responders than those of conventional weapons. We evaluated the impact of a mixed-methods training program on competency acquisition in cholinergic crisis clinical management using multimedia with either live animal or patient actor examples, and hands-on practice using SimMan3G mannequin simulators. A purposively selected sample of 204 civilian and military first responders who had not previously completed nerve agent training were assessed pre- and post-training for knowledge, performance, self-efficacy, and affective state. We conducted analysis of variance with repeated measures; statistical significance p textbackslashtextbackslashtextless 0.05. Both groups had significant performance improvement across all assessment dimensions: knowledge textbackslashtextbackslashtextgreater 20%, performance textbackslashtextbackslashtextgreater 50%, self-efficacy textbackslashtextbackslashtextgreater 34%, and affective state textbackslashtextbackslashtextgreater 15%. There were no significant differences between the live animal and patient actor groups. These findings could aid in the specification of training for first-responder personnel in military and civilian service. Although less comprehensive than U.S. Army Medical Research Institute of Chemical Defense courses, the training outcomes associated with this easily distributed program demonstrate its value in increasing the competency of first responders in recognizing and managing a mass casualty cholinergic event.},
keywords = {DoD, MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Choi, Ahyoung; Melo, Celso M.; Khooshabeh, Peter; Woo, Woontack; Gratch, Jonathan
Physiological evidence for a dual process model of the social effects of emotion in computers Journal Article
In: International Journal of Human-Computer Studies, vol. 74, pp. 41–53, 2015, ISSN: 10715819.
Abstract | Links | BibTeX | Tags: ARL, DoD, Virtual Humans
@article{choi_physiological_2015,
title = {Physiological evidence for a dual process model of the social effects of emotion in computers},
author = {Ahyoung Choi and Celso M. Melo and Peter Khooshabeh and Woontack Woo and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1071581914001414},
doi = {10.1016/j.ijhcs.2014.10.006},
issn = {10715819},
year = {2015},
date = {2015-02-01},
journal = {International Journal of Human-Computer Studies},
volume = {74},
pages = {41–53},
abstract = {There has been recent interest on the impact of emotional expressions of computers on people's decision making. However, despite a growing body of empirical work, the mechanism underlying such effects is still not clearly understood. To address this issue the paper explores two kinds of processes studied by emotion theorists in human-human interaction: inferential processes, whereby people retrieve information from emotion expressions about other's beliefs, desires, and intentions; affective processes, whereby emotion expressions evoke emotions in others, which then influence their decisions. To tease apart these two processes as they occur in human-computer interaction, we looked at physiological measures (electrodermal activity and heart rate deceleration). We present two experiments where participants engaged in social dilemmas with embodied agents that expressed emotion. Our results show, first, that people's decisions were influenced by affective and cognitive processes and, according to the prevailing process, people behaved differently and formed contrasting subjective ratings of the agents; second we show that an individual trait known as electrodermal lability, which measures people's physiological sensitivity, predicted the extent to which affective or inferential processes dominated the interaction. We discuss implications for the design of embodied agents and decision making systems that use emotion expression to enhance interaction between humans and computers.},
keywords = {ARL, DoD, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2014
Andreatta, Pamela; Klotz, Jessica; Madsen, James M.; Hurst, Charles G.; Talbot, Thomas
Assessment instrument validation for critical clinical competencies - pediatricneonatal intubation and cholinergic crisis management Proceedings Article
In: Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014, Orlando, FL, 2014.
Abstract | Links | BibTeX | Tags: DoD, MedVR, UARC
@inproceedings{andreatta_assessment_2014,
title = {Assessment instrument validation for critical clinical competencies - pediatricneonatal intubation and cholinergic crisis management},
author = {Pamela Andreatta and Jessica Klotz and James M. Madsen and Charles G. Hurst and Thomas Talbot},
url = {http://ict.usc.edu/pubs/Assessment%20instrument%20validation%20for%20critical%20clinical%20competencies%20-%20pediatricneonatal%20intubation%20and%20cholinergic%20crisis%20management.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014},
address = {Orlando, FL},
abstract = {Military and civilian first-responders must be able to recognize and effectively manage casualties that necessitate immediate application of critical clinical competencies. Two examples of these critical competencies are the clinical management of injuries resulting from nerve agents and difficult intubation, especially for pediatric or neonatal patients. The opportunity to learn and practice the necessary skills for these rare, but urgent, situations is complicated by the limited ability to replicate essential situational factors that influence performance in the applied clinical environment. Simulation-based training may resolve some of these challenges, however it is imperative that evidence be captured to document the achievement of performance competencies in the training environment that transfer to applied clinical care. The purpose of this study was to establish psychometric characteristics for competency assessment instruments associated with two such critical competencies: management of cholinergic crisis and pediatric-neonatal intubation. Methods: To inform the development of assessment instruments, we conducted comprehensive task analyses across each performance domain (knowledge, performance). Expert review confirmed content validity. Construct validity was established using the instruments to differentiate between the performance abilities of practitioners with variable experience (novice through expert). Purposively selected firstresponder subjects for pediatric-neonatal intubation (N=214) and cholinergic crisis management (N=123) were stratified by level of experience performing the requisite clinical competencies. All subjects completed knowledge and performance assessments. Reliability was established using test-retest (Pearson correlation) and internal consistency (Cronbach’s alpha) for knowledge and performance assessments. Results: Significantly higher scores for subjects with greater levels of experience, compared to those with less experience established construct validity for each assessment instrument (p textbackslashtextbackslashtextless .01). Significant correlations between test-retest outcomes indicated measurement reliability p textbackslashtextbackslashtextless .01. Cronbach’s alpha for knowledge and performance scores demonstrated excellent internal consistency. Conclusions: Psychometric evidence establishes the value of assessment for identifying and remedying critical competency performance gaps.},
keywords = {DoD, MedVR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Klotz, Jessica; Madsen, James M.; Hurst, Charles G.; Talbot, Thomas
Training Effects for First-responder Competency in Cholinergic Crisis Management Proceedings Article
In: Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014, Orlando, FL, 2014.
Abstract | Links | BibTeX | Tags: DoD, MedVR, UARC
@inproceedings{klotz_training_2014,
title = {Training Effects for First-responder Competency in Cholinergic Crisis Management},
author = {Jessica Klotz and James M. Madsen and Charles G. Hurst and Thomas Talbot},
url = {http://ict.usc.edu/pubs/Training%20Effects%20for%20First-responder%20Competency%20in%20Cholinergic%20Crisis%20Management.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014},
address = {Orlando, FL},
abstract = {Military and civilian first-responders must be able to recognize and effectively manage mass disaster casualties. Clinical management of injuries resulting from nerve agents provides different challenges for first responders than those of conventional weapons. We evaluated the impact of a mixed-methods training program on competency acquisition in cholinergic crisis clinical management. Methods: We developed a multimedia and simulation-based training program based on the more comprehensive USAMRICD courses. The training program was designed to provide first-responders with the necessary abilities to recognize and manage a mass casualty cholinergic crisis event. Training included a learner controlled multimedia iPad app and hands-on instruction using SimMan3G™ mannequin simulators. We evaluated the impact of the training through a purposively selected sample of 204 civilian and military first responders who had not previously completed either of the referenced USAMRICD courses. We assessed knowledge, performance, affect, and self-efficacy measures pre- and post-training using previously validated assessment instruments. We calculated results using analysis of variance with repeated measures, and with statistical significance set at p textbackslashtextbackslashtextless .05. Results: Analyses demonstrated a significant improvement (p = .000) across all domains (knowledge, performance, self-efficacy, and affect). Knowledge scores increased from 60% to 81% correct. Performance scores increased from 16% to 68% correct. Self-efficacy scores increased from 51% to 87% confidence in ability to effectively manage a cholinergic crisis event. Affect scores increased from 75% to 81% personal comfort during procedures. Conclusions: These findings could aid in the selection of instructional methodologies available to a broad community of first-responder personnel in military and civilian service. Although less comprehensive than the USAMRICD courses, training outcomes associated with this easily distributed instruction set demonstrated its value in increasing the competency of first responders in recognizing and managing a mass casualty cholinergic event. Retention outcomes are in process.},
keywords = {DoD, MedVR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Reger, Greg M.; Rizzo, Albert A.; Gahm, Gregory A.
Initial Development and Dissemination of Virtual Reality Exposure Therapy for Combat-Related PTSD Book Section
In: Safir, Marilyn P.; Wallach, Helene S.; Rizzo, Albert "Skip" (Ed.): Future Directions in Post-Traumatic Stress Disorder, pp. 289–302, Springer US, Boston, MA, 2014, ISBN: 978-1-4899-7521-8 978-1-4899-7522-5.
Abstract | Links | BibTeX | Tags: DoD, MedVR
@incollection{reger_initial_2014,
title = {Initial Development and Dissemination of Virtual Reality Exposure Therapy for Combat-Related PTSD},
author = {Greg M. Reger and Albert A. Rizzo and Gregory A. Gahm},
editor = {Marilyn P. Safir and Helene S. Wallach and Albert "Skip" Rizzo},
url = {http://link.springer.com/10.1007/978-1-4899-7522-5_15},
isbn = {978-1-4899-7521-8 978-1-4899-7522-5},
year = {2014},
date = {2014-11-01},
booktitle = {Future Directions in Post-Traumatic Stress Disorder},
pages = {289–302},
publisher = {Springer US},
address = {Boston, MA},
abstract = {Military personnel are at risk for the development of posttraumatic stress disorder. Although effective treatments are available, the need for improved treatment efficacy and less stigmatizing approaches to treatment have resulted in the evolution of virtual reality exposure therapy. This chapter reviews the development and dissemination efforts of a virtual reality system supporting exposure therapy for deployment-related posttraumatic stress disorder. Specifically, the chapter will review the work done to incorporate the feedback of military personnel into the early development of a Virtual Iraq/Afghanistan system and also reviews efforts to disseminate this treatment to military and Veteran behavioral health researchers and providers.},
keywords = {DoD, MedVR},
pubstate = {published},
tppubtype = {incollection}
}
Dehghani, M.; Khooshabeh, P.; Nazarian, A.; Gratch, J.
The Subtlety of Sound: Accent as a Marker for Culture Journal Article
In: Journal of Language and Social Psychology, 2014, ISSN: 0261-927X, 1552-6526.
Abstract | Links | BibTeX | Tags: ARL, DoD, Virtual Humans
@article{dehghani_subtlety_2014,
title = {The Subtlety of Sound: Accent as a Marker for Culture},
author = {M. Dehghani and P. Khooshabeh and A. Nazarian and J. Gratch},
url = {http://jls.sagepub.com/cgi/doi/10.1177/0261927X14551095},
doi = {10.1177/0261927X14551095},
issn = {0261-927X, 1552-6526},
year = {2014},
date = {2014-09-01},
journal = {Journal of Language and Social Psychology},
abstract = {Aspects of language, such as accent, play a crucial role in the formation and categorization of one’s cultural identity. Recent work on accent emphasizes the role of accent in person perception and social categorization, demonstrating that accent also serves as a meaningful indicator of an ethnic category. In this article, we investigate whether the accent of an interaction partner, as a marker for culture, can induce cultural frame-shifts in biculturals. We report the results of three experiments, performed among bicultural and monocultural individuals, in which we test the above hypothesis. Our results demonstrate that accent alone can affect people’s cognition.},
keywords = {ARL, DoD, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2013
Khooshabeh, Peter; Dehghani, Morteza; Nazarian, Angela; Gratch, Jonathan
The Cultural Influence Model: When Accented Natural Language Spoken by Virtual Characters Matters Journal Article
In: Journal of Artificial Intelligence and Society, vol. 29, 2013.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@article{khooshabeh_cultural_2013,
title = {The Cultural Influence Model: When Accented Natural Language Spoken by Virtual Characters Matters},
author = {Peter Khooshabeh and Morteza Dehghani and Angela Nazarian and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Cultural%20Influence%20Model-%20When%20Accented%20Natural%20Language%20Spoken%20by%20Virtual%20Characters%20Matters.pdf},
year = {2013},
date = {2013-09-01},
journal = {Journal of Artificial Intelligence and Society},
volume = {29},
abstract = {Advances in Artificial Intelligence (AI) and computer graphics digital technologies have contributed to a relative increase of realism in virtual characters. Preserving virtual characters’ communicative realism, in particular, joined the ranks of the improvements in natural language technology and animation algorithms. This paper focuses on culturally relevant paralinguistic cues in nonverbal communication. We model the effects of an English speaking digital character with different accents on human interactants (i.e., users). Our cultural influence model proposes that paralinguistic realism, in the form of accented speech, is effective in promoting culturally congruent cognition only when it is self-relevant to users. For example, a Chinese or Middle Eastern English accent may be perceived as foreign to individuals who do not share the same ethnic cultural background with members of those cultures. However, for individuals who are familiar and affiliate with those cultures (i.e., in-group members who are bicultural), accent not only serves as a motif of shared social identity, it also primes them to adopt culturally appropriate interpretive frames that influence their decision making.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hart, John; Gratch, Jonathan; Marsella, Stacy C.
How virtual reality training can win friends and influence people Book Section
In: Best, Christopher; Galanis, George; Kerry, James; Sottilare, Robert (Ed.): Fundamental Issues in Defense Training and Simulation, Ashgate, 2013, ISBN: 978-1-4094-4721-4.
Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@incollection{hart_how_2013,
title = {How virtual reality training can win friends and influence people},
author = {John Hart and Jonathan Gratch and Stacy C. Marsella},
editor = {Christopher Best and George Galanis and James Kerry and Robert Sottilare},
url = {http://www.amazon.com/Fundamental-Defense-Training-Simulation-Factors-ebook/dp/B00EUE2F2I},
isbn = {978-1-4094-4721-4},
year = {2013},
date = {2013-08-01},
booktitle = {Fundamental Issues in Defense Training and Simulation},
publisher = {Ashgate},
series = {Human Factors in Defense},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Khooshabeh, Peter; Melo, Celso M.; Volkman, Brooks; Gratch, Jonathan; Blascovich, Jim; Carnevale, Peter
Negotiation Strategies with Incongruent Facial Expressions of Emotion Cause Cardiovascular Threat Proceedings Article
In: Cognitive Science, Berlin, Germany, 2013.
Abstract | Links | BibTeX | Tags: ARL, DoD, ICB, UARC, Virtual Humans
@inproceedings{khooshabeh_negotiation_2013,
title = {Negotiation Strategies with Incongruent Facial Expressions of Emotion Cause Cardiovascular Threat},
author = {Peter Khooshabeh and Celso M. Melo and Brooks Volkman and Jonathan Gratch and Jim Blascovich and Peter Carnevale},
url = {http://ict.usc.edu/pubs/Negotiation%20Strategies%20with%20Incongruent%20Facial%20Expressions%20of%20Emotion%20Cause%20Cardiovascular%20Threat.pdf},
year = {2013},
date = {2013-08-01},
booktitle = {Cognitive Science},
address = {Berlin, Germany},
abstract = {Affect is important in motivated performance situations such as negotiation. Longstanding theories of emotion suggest that facial expressions provide enough information to perceive another person’s internal affective state. Alternatively, the contextual emotion hypothesis posits that situational factors bias the perception of emotion in others’ facial displays. This hypothesis predicts that individuals will have different perceptions of the same facial expression depending upon the context in which the expression is displayed. In this study, cardiovascular indexes of motivational states (i.e., challenge vs. threat) were recorded while players engaged in a multi-issue negotiation where the opposing negotiator (confederate) displayed emotional facial expressions (angry vs. happy); the confederate’s negotiation strategy (cooperative vs. competitive) was factorially crossed with his facial expression. During the game, participants’ eye fixations and cardiovascular responses, indexing task engagement and challenge/threat motivation, were recorded. Results indicated that participants playing confederates with incongruent facial expressions (e.g., cooperative strategy, angry face) exhibited a greater threat response, which arises due to increased uncertainty. Eye fixations also suggest that participants look at the face more in order to acquire information to reconcile their uncertainty in the incongruent condition. Taken together, these results suggest that context matters in the perception of emotion.},
keywords = {ARL, DoD, ICB, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Yuqiong; Khooshabeh, Peter; Gratch, Jonathan
Looking Real and Making Mistakes Proceedings Article
In: 13th International Conference on Intelligent Virtual Humans, Edinburgh, Scotland, 2013.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{wang_looking_2013,
title = {Looking Real and Making Mistakes},
author = {Yuqiong Wang and Peter Khooshabeh and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Looking%20Real%20and%20Making%20Mistakes.pdf},
year = {2013},
date = {2013-08-01},
booktitle = {13th International Conference on Intelligent Virtual Humans},
address = {Edinburgh, Scotland},
abstract = {What happens when a Virtual Human makes mistakes? In this study we investigate the impact of VHs' conversational mistakes in the context of persuasion. The experiment also manipulated the level of photorealism of the VH. Users interacted with a VH that told persuasive information, and they were given the option to use the information to complete a problem-solving task. The VH occasionally made mistakes such as not responding, repeating the same answer, or giving irrelevant feedback. Results indicated that a VH is less persuasive when he or she makes textbackslashtextbackslashtextbackslashtextbackslashemphconversational mistakes. Individual differences also shed light on the cognitive processes of users who interacted with VH who made conversational errors. Participants with a low Need For Cognition are more effected by the conversational errors. VH photorealism or gender did not have significant effects on the persuasion measure. We discuss the implications of these results with regard to Human-Virtual Human interaction.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Courtney, Chris; Dawson, Michael E.; Rizzo, Albert; Arizmendi, Brian
Visuospatial Processing and Learning Effects in Virtual Reality Based Mental Rotation and Navigational Tasks Proceedings Article
In: International Conference on Human-Computer Interaction, Las Vegas, NV, 2013.
Abstract | Links | BibTeX | Tags: DoD, MedVR, UARC
@inproceedings{parsons_visuospatial_2013,
title = {Visuospatial Processing and Learning Effects in Virtual Reality Based Mental Rotation and Navigational Tasks},
author = {Thomas D. Parsons and Chris Courtney and Michael E. Dawson and Albert Rizzo and Brian Arizmendi},
url = {http://ict.usc.edu/pubs/Visuospatial%20Processing%20and%20Learning%20Effects%20in%20Virtual%20Reality%20Based%20Mental%20Rotation%20and%20Navigational%20Tasks.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {International Conference on Human-Computer Interaction},
address = {Las Vegas, NV},
abstract = {Visuospatial function and performance in interactions between humans and computers involve the human identification and manipulation of computer generated stimuli and their location. The impact of learning on mental rotation has been demonstrated in studies relating everyday spatial activities and spatial abilities. An aspect of visuospatial learning in virtual environments that has not been widely studied is the impact of threat on learning in a navigational task. In fact, to our knowledge, the combined assessment of learning during mental rotation trials and learning in an ecologically valid virtual reality-based navigational environment (that has both high and low threat zones) has not been adequately studied. Results followed expectation: 1) learning occurred in the virtual reality based mental rotation test. Although there was a relation between route learning and practice, a primacy effect was observed as participants performed more poorly when going from the first zone to the last.},
keywords = {DoD, MedVR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Hatch, Henry J.; Cherry, W. Peter; Glimcher, Paul W.; Hill, Randall W.; Keesee, Robin L.; Kieff, Elliot D.; Macmillan, Jean; Melvin, William L.; Paul, Richard R.; Pew, Richard; Rose, M. Frank; Sciarretta, Albert A.; Speed, Ann; Yakovac, Joseph
Making the Soldier Decisive on Future Battlefields Book
National Academies Press, Washington D.C., 2013.
Links | BibTeX | Tags: DoD, UARC
@book{hatch_making_2013,
title = {Making the Soldier Decisive on Future Battlefields},
author = {Henry J. Hatch and W. Peter Cherry and Paul W. Glimcher and Randall W. Hill and Robin L. Keesee and Elliot D. Kieff and Jean Macmillan and William L. Melvin and Richard R. Paul and Richard Pew and M. Frank Rose and Albert A. Sciarretta and Ann Speed and Joseph Yakovac},
url = {http://www.nap.edu/catalog.php?record_id=18321},
year = {2013},
date = {2013-05-01},
publisher = {National Academies Press},
address = {Washington D.C.},
keywords = {DoD, UARC},
pubstate = {published},
tppubtype = {book}
}
Rizzo, Albert; Buckwalter, John Galen; Forbell, Eric; Reist, Chris; Difede, JoAnn; Rothbaum, Barbara O.; Lange, Belinda; Koenig, Sebastian; Talbot, Thomas
Virtual Reality Applications to Address the Wounds of War Journal Article
In: Psychiatric Annals, vol. 43, no. 3, pp. 123–138, 2013.
Links | BibTeX | Tags: DoD, MedVR, UARC
@article{rizzo_virtual_2013-2,
title = {Virtual Reality Applications to Address the Wounds of War},
author = {Albert Rizzo and John Galen Buckwalter and Eric Forbell and Chris Reist and JoAnn Difede and Barbara O. Rothbaum and Belinda Lange and Sebastian Koenig and Thomas Talbot},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Applications%20to%20Address%20the%20Wounds%20of%20War.pdf},
year = {2013},
date = {2013-03-01},
journal = {Psychiatric Annals},
volume = {43},
number = {3},
pages = {123–138},
keywords = {DoD, MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert; Difede, JoAnn; Rothbaum, Barbara O.; Daughtry, J. Martin; Reger, Greg
Virtual Reality as a Tool for Delivering PTSD Exposure Therapy Book Section
In: Post-Traumatic Stress Disorder: Future Directions in Prevention, Diagnosis, and Treatment, Springer, 2013.
Abstract | Links | BibTeX | Tags: DoD, MedVR
@incollection{rizzo_virtual_2013,
title = {Virtual Reality as a Tool for Delivering PTSD Exposure Therapy},
author = {Albert Rizzo and JoAnn Difede and Barbara O. Rothbaum and J. Martin Daughtry and Greg Reger},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20as%20a%20Tool%20for%20Delivering%20PTSD%20Exposure%20Therapy.pdf},
year = {2013},
date = {2013-01-01},
booktitle = {Post-Traumatic Stress Disorder: Future Directions in Prevention, Diagnosis, and Treatment},
publisher = {Springer},
abstract = {Virtual Reality (VR) technology offers new opportunities for the development of innovative assessment and intervention tools. VR-based testing, training, and treatment approaches that would be difficult, if not impossible, to deliver using traditional methods are now being developed that take advantage of the assets available with VR technology. If empirical studies continue to demonstrate effectiveness, VR applications could provide new options for targeting the cognitive, psychological, motor and functional impairments that result from various psychological and physical disorders and conditions. VR allows for the precise presentation and control of stimuli within dynamic multi-sensory 3D computer generated environments, as well as providing advanced methods for capturing and quantifying behavioral responses. These characteristics serve as the basis for the rationale for VR applications in the clinical assessment, intervention and training domains. This chapter will begin with a brief review of the history and rationale for the use of VR with clinical populations followed by a description of the technology for creating and using VR clinically. The chapter will then focus on reviewing the rationale for VR Exposure Therapy (VRET) applied to Anxiety Disorders. The use of VRET for the treatment of PTSD will then be detailed followed by a description of the Virtual Iraq/Afghanistan VRET system and the results from its use with OEF/OIF Service Members and Veterans.},
keywords = {DoD, MedVR},
pubstate = {published},
tppubtype = {incollection}
}
2012
Gahm, Gregory; Reger, Greg; Ingram, Mary V.; Reger, Mark; Rizzo, Albert
A Multisite, Randomized Clinical Trial of Virtual Reality and Prolonged Exposure Therapy for Active Duty Soldiers with PTSD Technical Report
no. A611975, 2012.
Abstract | Links | BibTeX | Tags: DoD, MedVR
@techreport{gahm_multisite_2012,
title = {A Multisite, Randomized Clinical Trial of Virtual Reality and Prolonged Exposure Therapy for Active Duty Soldiers with PTSD},
author = {Gregory Gahm and Greg Reger and Mary V. Ingram and Mark Reger and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20Multisite,%20Randomized%20Clinical%20Trial%20of%20Virtual%20Reality%20and%20Prolonged%20Exposure%20Therapy%20for%20Active%20Duty%20Soldiers%20with%20PTSD.pdf},
year = {2012},
date = {2012-12-01},
number = {A611975},
abstract = {This randomized, single blind study extends recruitment to an additional active duty site (Womack Army Medical Center at Ft Bragg) in support of a previously funded clinical trial to evaluate the efficacy of virtual reality exposure therapy (VRET) and prolonged exposure therapy (PE) with a waitlist (WL) group in the treatment of posttraumatic stress disorder (PTSD) in active duty (AD) Soldiers with combat-related trauma. During the first year, the study team developed the infrastructure to implement the trial including personnel recruitment, hiring, and initial training, process development to identify, screen, and enroll participants, and research protocol development and approval by IRB s. During the second year hiring of clinical staff and training of the study team was completed. Recruitment and enrollment commenced.},
keywords = {DoD, MedVR},
pubstate = {published},
tppubtype = {techreport}
}
Leaman, Suzanne; Rothbaum, Barbara O.; Difede, JoAnn; Cukor, Judith; Gerardi, Maryrose; Rizzo, Albert
Virtual Reality Exposure Therapy: A Treatment Manual for Combat Related PTSD Book Section
In: Handbook of Military Social Work, John Wiley & Sons, Inc., Hoboken, NJ, 2012.
Abstract | Links | BibTeX | Tags: DoD, MedVR
@incollection{leaman_virtual_2012,
title = {Virtual Reality Exposure Therapy: A Treatment Manual for Combat Related PTSD},
author = {Suzanne Leaman and Barbara O. Rothbaum and JoAnn Difede and Judith Cukor and Maryrose Gerardi and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Exposure%20Therapy-%20A%20Treatment%20Manual%20for%20Combat%20Related%20PTSD.pdf},
year = {2012},
date = {2012-12-01},
booktitle = {Handbook of Military Social Work},
publisher = {John Wiley & Sons, Inc.},
address = {Hoboken, NJ},
abstract = {Posttraumatic stress disorder (PTSD) is a chronic condition that occurs in a significant minority of persons who experience life-threatening traumatic events. It is characterized by reexperiencing, avoidance, and hyperarousal symptoms (American Psychiatric Association, 1994). PTSD has been estimated to affect up to 18% of returning Operation Iraqi Freedom (OIF) Veterans (Hoge et al., 2004). In addition to the specific conditions in Iraq and Afghanistan, an unprecedented number are now surviving serious wounds (Blimes, 2007). The stigma of treatment often prevents service members (SMs) and veterans from seeking help (Hoge et al., 2004), so finding an acceptable form of treatment for military personnel is a priority. The current generation of military personnel may be more comfortable participating in a virtual reality treatment approach than in traditional talk therapy, as they are likely familiar with gaming and training simulation technology. This chapter provides information on the development of and research on virtual reality (VR) as well as the application of VR to mental health treatments, including a protocol of virtual reality exposure (VRE) utilizing a virtual Iraq/Afghanistan system for combat-related PTSD.},
keywords = {DoD, MedVR},
pubstate = {published},
tppubtype = {incollection}
}
Khooshabeh, Peter; Hegarty, Mary; Shipley, Thomas F.
Individual Differences in Mental Rotation Journal Article
In: Experimental Psychology, vol. 59, 2012.
Abstract | Links | BibTeX | Tags: ARL, DoD, ICB, Virtual Humans
@article{khooshabeh_individual_2012,
title = {Individual Differences in Mental Rotation},
author = {Peter Khooshabeh and Mary Hegarty and Thomas F. Shipley},
url = {http://ict.usc.edu/pubs/Individual%20Differences%20in%20Mental%20Rotation.pdf},
year = {2012},
date = {2012-11-01},
journal = {Experimental Psychology},
volume = {59},
abstract = {Two experiments tested the hypothesis that imagery ability and figural complexity interact to affect the choice of mental rotation strategies. Participants performed the Shepard and Metzler (1971) mental rotation task. On half of the trials, the 3-D figures were manipulated to create "fragmented" figures, with some cubes missing. Good imagers were less accurate and had longer response times on fragmented figures than on complete figures. Poor imagers performed similarly on fragmented and complete figures. These results suggest that good imagers use holistic mental rotation strategies by default, but switch to alternative strategies depending on task demands, whereas poor imagers are less flexible and use piecemeal strategies regardless of the task demands.},
keywords = {ARL, DoD, ICB, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}