Publications
Search
Mozgai, Sharon; Hartholt, Arno; Akinyemi, Dayo; Kubicek, Katarina; Rizzo, Albert (Skip); Kipke, Michele
In: HCI International 2020 - Posters, vol. 1225, pp. 304–307, Springer International Publishing, Cham, Switzerland, 2020, ISBN: 978-3-030-50728-2 978-3-030-50729-9.
@incollection{mozgai_development_2020,
title = {Development and Initial Feasibility Testing of the Virtual Research Navigator (VRN): A Public-Facing Agent-Based Educational System for Clinical Research Participation},
author = {Sharon Mozgai and Arno Hartholt and Dayo Akinyemi and Katarina Kubicek and Albert (Skip) Rizzo and Michele Kipke},
url = {http://link.springer.com/10.1007/978-3-030-50729-9_43},
doi = {10.1007/978-3-030-50729-9_43},
isbn = {978-3-030-50728-2 978-3-030-50729-9},
year = {2020},
date = {2020-07-01},
booktitle = {HCI International 2020 - Posters},
volume = {1225},
pages = {304–307},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {The overall goal of VRN is to develop a novel technology solution at Children’s Hospital Los Angeles (CHLA) to overcome barriers that prevent the recruitment of diverse patient populations to clinical trials by providing both caregivers and children with an interactive educational experience. This system consists of 1) an intelligent agent called Zippy that users interact with by keyboard or voice input, 2) a series of videos covering topics including Privacy, Consent and Benefits, and 3) a UI that guides users through all available content. Pre- and post-questionnaires assessed willingness to participate in clinical research and found participants either increased or maintained their level of willingness to participate in research studies. Additionally, qualitative analysis of interview data revealed participants rated the overall interaction favorably and believed Zippy to be more fun, less judgmental and less threatening than interacting with a human. Future iterations are in-progress based on the user-feedback},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Mozgai, Sharon; Hartholt, Arno; Leeds, Andrew; Rizzo, Albert ‘Skip’
Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma Proceedings Article
In: Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems, pp. 8, ACM, Honolulu, HI, 2020.
@inproceedings{mozgai_iterative_2020,
title = {Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma},
author = {Sharon Mozgai and Arno Hartholt and Andrew Leeds and Albert ‘Skip’ Rizzo},
url = {https://dl.acm.org/doi/abs/10.1145/3334480.3375219},
doi = {10.1145/3334480.3375219},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems},
pages = {8},
publisher = {ACM},
address = {Honolulu, HI},
abstract = {This case study describes the expansion of the BRAVEMIND virtual reality exposure therapy (VRET) system from the domain of combat-related posttraumatic stress disorder (PTSD) to the domain of PTSD due to Military Sexual Trauma (MST). As VRET continues to demonstrate efficacy in treating PTSD across multiple trauma types and anxiety disorders, adapting existing systems and content to new domains while simultaneously maintaining clinical integrity is becoming a high priority. To develop BRAVEMIND-MST we engaged in an iterative participatory design process with psychologists, engineers, and artists. This first-person account of our collaborative development process focuses on three key areas (1) VR Environment, (2) User-Avatar State, and (3) Events, while detailing the challenges we encountered and lessons learned. This process culminated in eight design guidelines as a first-step in defining a VRET domain transfer methodology.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert
The Passive Sensing Agent: A Multimodal Adaptive mHealth Application Proceedings Article
In: Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), pp. 1–3, IEEE, Austin, TX, USA, 2020, ISBN: 978-1-72814-716-1.
@inproceedings{mozgai_passive_2020,
title = {The Passive Sensing Agent: A Multimodal Adaptive mHealth Application},
author = {Sharon Mozgai and Arno Hartholt and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/9156177/},
doi = {10.1109/PerComWorkshops48775.2020.9156177},
isbn = {978-1-72814-716-1},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
pages = {1–3},
publisher = {IEEE},
address = {Austin, TX, USA},
abstract = {We are demoing the Passive Sensing Agent (PSA), an mHealth virtual human coach, that collects multimodal data through passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin). This virtual human interface delivers adaptive multi-media content via smartphone application that is specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Initially developed for the military, the PSA delivers health interventions (e.g., educational exercises, physical challenges, and performance feedback) matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). A virtual human coach leads all interactions including the firsttime user experience and the brief daily sessions. All interactions were specifically designed to engage and motivate the user while continuously collecting data on their cognitive, emotional, and physical fitness. This multi-component application is integrated and deployed on an iPhone and Apple Watch prototype; a civilian version is currently in-development.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert "Skip"
An Adaptive Agent-Based Interface for Personalized Health Interventions Proceedings Article
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 118–119, ACM, Cagliari Italy, 2020, ISBN: 978-1-4503-7513-9.
@inproceedings{mozgai_adaptive_2020,
title = {An Adaptive Agent-Based Interface for Personalized Health Interventions},
author = {Sharon Mozgai and Arno Hartholt and Albert "Skip" Rizzo},
url = {https://dl.acm.org/doi/10.1145/3379336.3381467},
doi = {10.1145/3379336.3381467},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {118–119},
publisher = {ACM},
address = {Cagliari Italy},
abstract = {This demo introduces a novel mHealth application with an agentbased interface designed to collect multimodal data with passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin) as well as through user self-report. This mHealth application delivers personalized and adaptive multimedia content via smartphone application specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health via novel adaptive logic-based algorithms while employing behavior change techniques (e.g., goal-setting, barrier identification, etc.). A virtual human coach leads all interactions to improve adherence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pilly, Praveen K.; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Lerner, Itamar; Jones, Aaron P.; Robert, Bradley; Bryant, Natalie B.; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael D.
In: Frontiers in Neuroscience, vol. 13, pp. 1416, 2020, ISSN: 1662-453X.
@article{pilly_one-shot_2020,
title = {One-Shot Tagging During Wake and Cueing During Sleep With Spatiotemporal Patterns of Transcranial Electrical Stimulation Can Boost Long-Term Metamemory of Individual Episodes in Humans},
author = {Praveen K. Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Itamar Lerner and Aaron P. Jones and Bradley Robert and Natalie B. Bryant and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael D. Howard},
url = {https://www.frontiersin.org/article/10.3389/fnins.2019.01416/full},
doi = {10.3389/fnins.2019.01416},
issn = {1662-453X},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Neuroscience},
volume = {13},
pages = {1416},
abstract = {Targeted memory reactivation (TMR) during slow-wave oscillations (SWOs) in sleep has been demonstrated with sensory cues to achieve about 5–12% improvement in post-nap memory performance on simple laboratory tasks. But prior work has not yet addressed the one-shot aspect of episodic memory acquisition, or dealt with the presence of interference from ambient environmental cues in real-world settings. Further, TMR with sensory cues may not be scalable to the multitude of experiences over one’s lifetime. We designed a novel non-invasive non-sensory paradigm that tags one-shot experiences of minute-long naturalistic episodes in immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). In particular, we demonstrated that these STAMPs can be reapplied as brief pulses during SWOs in sleep to achieve about 10–20% improvement in the metamemory of targeted episodes compared to the control episodes at 48 hours after initial viewing. We found that STAMPs can not only facilitate but also impair metamemory for the targeted episodes based on an interaction between presleep metamemory and the number of STAMP applications during sleep. Overnight metamemory improvements were mediated by spectral power increases following the offset of STAMPs in the slow-spindle band (8–12 Hz) for left temporal areas in the scalp electroencephalography (EEG) during sleep. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR Proceedings Article
In: Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 308–3084, IEEE, San Diego, CA, USA, 2019, ISBN: 978-1-72815-604-0.
@inproceedings{hartholt_ubiquitous_2019,
title = {Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://ieeexplore.ieee.org/document/8942321/},
doi = {10.1109/AIVR46125.2019.00072},
isbn = {978-1-72815-604-0},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {308–3084},
publisher = {IEEE},
address = {San Diego, CA, USA},
abstract = {We present an architecture and framework for the development of virtual humans for a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The framework uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation, and rendering. This work builds on the Virtual Human Toolkit, which has been extended to support computing platforms beyond Windows. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in roomscale VR, autonomous AI in mobile AR, and real-time user performance feedback based on mobile sensors in headset AR.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert “Skip”
Systematic Representative Design and Clinical Virtual Reality Journal Article
In: Psychological Inquiry, vol. 30, no. 4, pp. 231–245, 2019, ISSN: 1047-840X, 1532-7965.
@article{mozgai_systematic_2019,
title = {Systematic Representative Design and Clinical Virtual Reality},
author = {Sharon Mozgai and Arno Hartholt and Albert “Skip” Rizzo},
url = {https://www.tandfonline.com/doi/full/10.1080/1047840X.2019.1693873},
doi = {10.1080/1047840X.2019.1693873},
issn = {1047-840X, 1532-7965},
year = {2019},
date = {2019-10-01},
journal = {Psychological Inquiry},
volume = {30},
number = {4},
pages = {231–245},
abstract = {The authors of the article, “Causal Inference in Generalizable Environments: Systematic Representative Design”, boldly announce their core point in the opening line of the abstract stating that, “Causal inference and generalizability both matter.” While a surface glance might suggest this to be a simple notion, a closer examination reveals the complexity of what they are proposing. This complexity is apparent when one considers that the bulk of human experimental research has always been challenged in its inability to concurrently deliver on both of these aims. This is no slight on the tens of 1000’s of human researchers and behavioral scientists who have devoted long careers to highly controlled human psychological and social science laboratory research. Rather, it reflects the sheer enormity of the challenges for conducting human studies designed to specify human function with physics-informed lab methods, while at the same time producing results that lead to enhanced understanding and prediction of how people will operate in the complex and ever-changing contexts that make up everyday life. At the core of this issue is a methodological and philosophical challenge that is relevant to all areas of human subjects’ research, beyond the social science focus of the Miller et al. (this issue) article. It is our aim to discuss the central topics in their article through the lens of our own work using Virtual/Augmented Reality and Virtual Human simulation technologies for clinical and training applications},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon; Fast, Ed; Liewer, Matt; Reilly, Adam; Whitcup, Wendy; Rizzo, Albert "Skip"
Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers Proceedings Article
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 205–207, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
@inproceedings{hartholt_virtual_2019,
title = {Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers},
author = {Arno Hartholt and Sharon Mozgai and Ed Fast and Matt Liewer and Adam Reilly and Wendy Whitcup and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3352766},
doi = {10.1145/3349537.3352766},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {205–207},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {We present one of the first applications of virtual humans in Augmented Reality (AR), which allows young adults with Autism Spectrum Disorder (ASD) the opportunity to practice job interviews. It uses the Magic Leap’s AR hardware sensors to provide users with immediate feedback on six different metrics, including eye gaze, blink rate and head orientation. The system provides two characters, with three conversational modes each. Ported from an existing desktop application, the main development lessons learned were: 1) provide users with navigation instructions in the user interface, 2) avoid dark colors as they are rendered transparently, 3) use dynamic gaze so characters maintain eye contact with the user, 4) use hardware sensors like eye gaze to provide user feedback, and 5) use surface detection to place characters dynamically in the world.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon; Rizzo, Albert "Skip"
Virtual Job Interviewing Practice for High-Anxiety Populations Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 238–240, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
@inproceedings{hartholt_virtual_2019-1,
title = {Virtual Job Interviewing Practice for High-Anxiety Populations},
author = {Arno Hartholt and Sharon Mozgai and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329417},
doi = {10.1145/3308532.3329417},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {238–240},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present a versatile system for training job interviewing skills that focuses specifically on segments of the population facing increased challenges during the job application process. In particular, we target those with Autism Spectrum Disorder (ADS), veterans transitioning to civilian life, and former convicts integrating back into society. The system itself follows the SAIBA framework and contains several interviewer characters, who each represent a different type of vocational field, (e.g. service industry, retail, office, etc.) Each interviewer can be set to one of three conversational modes, which not only affects what they say and how they say it, but also their supporting body language. This approach offers varying difficulties, allowing users to start practicing with interviewers who are more encouraging and accommodating before moving on to personalities that are more direct and indifferent. Finally, the user can place the interviewers in different environmental settings (e.g. conference room, restaurant, executive office, etc.), allowing for many different combinations in which to practice.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pilly, Praveen K; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Jones, Aaron P.; Bradley, Robert; Bryant, Natalie B.; Lerner, Itamar; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael P.
Spatiotemporal patterns of transcranial electrical stimulation can strengthen the metamemory of individual episodic memories in humans Journal Article
In: bioRxiv, pp. 110, 2019.
@article{pilly_spatiotemporal_2019,
title = {Spatiotemporal patterns of transcranial electrical stimulation can strengthen the metamemory of individual episodic memories in humans},
author = {Praveen K Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Aaron P. Jones and Robert Bradley and Natalie B. Bryant and Itamar Lerner and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael P. Howard},
url = {https://www.biorxiv.org/content/10.1101/672378v1.abstract},
doi = {10.1101/672378},
year = {2019},
date = {2019-06-01},
journal = {bioRxiv},
pages = {110},
abstract = {Long-term retention of memories critically depends on consolidation processes, which occur during slow-wave oscillations (SWOs) in non-rapid eye movement (NREM) sleep. We designed a non-invasive system that can tag one-shot experiences of naturalistic episodes within immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). We demonstrate that these STAMPs can be re-applied during UP states of SWOs on two consecutive nights to achieve a 19.43% improvement in the metamemory of targeted episodes at 48 hours after the one-shot viewing, compared to the control episodes. Further, we found an interaction between pre-sleep metamemory of targeted episodes and the number of STAMP applications for those episodes during sleep, and that STAMPs elicit increases in left temporal slow-spindle (9-12 Hz) power that are predictive of overnight metamemory improvements. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory based on pre-sleep performance and tracking the STAMPinduced biomarker during sleep, and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Leuski, Anton; Benn, Grace; Klassen, Eric; Fast, Edward; Liewer, Matt; Hartholt, Arno; Traum, David
PRIMER: An Emotionally Aware Virtual Agent Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 10, ACM, Los Angeles, CA, 2019.
@inproceedings{gordon_primer_2019,
title = {PRIMER: An Emotionally Aware Virtual Agent},
author = {Carla Gordon and Anton Leuski and Grace Benn and Eric Klassen and Edward Fast and Matt Liewer and Arno Hartholt and David Traum},
url = {https://www.research.ibm.com/haifa/Workshops/user2agent2019/},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {10},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {PRIMER is a proof-of-concept system designed to show the potential of immersive dialogue agents and virtual environments that adapt and respond to both direct verbal input and indirect emotional input. The system has two novel interfaces: (1) for the user, an immersive VR environment and an animated virtual agent both of which adapt and react to the user’s direct input as well as the user’s perceived emotional state, and (2) for an observer, an interface that helps track the perceived emotional state of the user, with visualizations to provide insight into the system’s decision making process. While the basic system architecture can be adapted for many potential real world applications, the initial version of this system was designed to assist clinical social workers in helping children cope with bullying. The virtual agent produces verbal and non-verbal behaviors guided by a plan for the counseling session, based on in-depth discussions with experienced counselors, but is also reactive to both initiatives that the user takes, e.g. asking their own questions, and the user’s perceived emotional state.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lerner, Itamar; Ketz, Nicholas A.; Jones, Aaron P.; Bryant, Natalie B.; Robert, Bradley; Skorheim, Steven W.; Hartholt, Arno; Rizzo, Albert S.; Gluck, Mark A.; Clark, Vincent P.; Pilly, Praveen K.
In: Scientific Reports, vol. 9, no. 1, 2019, ISSN: 2045-2322.
@article{lerner_transcranial_2019,
title = {Transcranial Current Stimulation During Sleep Facilitates Insight into Temporal Rules, but does not Consolidate Memories of Individual Sequential Experiences},
author = {Itamar Lerner and Nicholas A. Ketz and Aaron P. Jones and Natalie B. Bryant and Bradley Robert and Steven W. Skorheim and Arno Hartholt and Albert S. Rizzo and Mark A. Gluck and Vincent P. Clark and Praveen K. Pilly},
url = {http://www.nature.com/articles/s41598-018-36107-7},
doi = {10.1038/s41598-018-36107-7},
issn = {2045-2322},
year = {2019},
date = {2019-02-01},
journal = {Scientific Reports},
volume = {9},
number = {1},
abstract = {Slow-wave sleep (SWS) is known to contribute to memory consolidation, likely through the reactivation of previously encoded waking experiences. Contemporary studies demonstrate that when auditory or olfactory stimulation is administered during memory encoding and then reapplied during SWS, memory consolidation can be enhanced, an effect that is believed to rely on targeted memory reactivation (TMR) induced by the sensory stimulation. Here, we show that transcranial current stimulations (tCS) during sleep can also be used to induce TMR, resulting in the facilitation of high-level cognitive processes. Participants were exposed to repeating sequences in a realistic 3D immersive environment while being stimulated with particular tCS patterns. A subset of these tCS patterns was then reapplied during sleep stages N2 and SWS coupled to slow oscillations in a closed-loop manner. We found that in contrast to our initial hypothesis, performance for the sequences corresponding to the reapplied tCS patterns was no better than for other sequences that received stimulations only during wake or not at all. In contrast, we found that the more stimulations participants received overnight, the more likely they were to detect temporal regularities governing the learned sequences the following morning, with tCS-induced beta power modulations during sleep mediating this effect.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert; Roy, Michael J.; Hartholt, Arno; Costanzo, Michelle; Highland, Krista Beth; Jovanovic, Tanja; Norrholm, Seth D.; Reist, Chris; Rothbaum, Barbara; Difede, JoAnn
Virtual Reality Applications for the Assessment and Treatment of PTSD Book Section
In: Handbook of Military Psychology, pp. 453–471, Springer International Publishing, Cham, Switzerland, 2017, ISBN: 978-3-319-66190-2 978-3-319-66192-6.
@incollection{rizzo_virtual_2017,
title = {Virtual Reality Applications for the Assessment and Treatment of PTSD},
author = {Albert Rizzo and Michael J. Roy and Arno Hartholt and Michelle Costanzo and Krista Beth Highland and Tanja Jovanovic and Seth D. Norrholm and Chris Reist and Barbara Rothbaum and JoAnn Difede},
url = {http://link.springer.com/10.1007/978-3-319-66192-6_27},
doi = {10.1007/978-3-319-66192-6_27},
isbn = {978-3-319-66190-2 978-3-319-66192-6},
year = {2017},
date = {2017-12-01},
booktitle = {Handbook of Military Psychology},
pages = {453–471},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {War is one of the most challenging situations that a human being can encounter. The physical, emotional, cognitive, and psychological demands of a combat environment place tremendous stress on even the most well-prepared military people. It is no surprise that the stressful experiences, characteristics of operations in Iraq and Afghanistan, have produced significant numbers of service members (SMs) and veterans at risk for posttraumatic stress disorder (PTSD), as well as other psychosocial/behavioral health conditions. For example, as of June 2015, the Defense Medical Surveillance System reported 138,197 active duty SMs had been diagnosed with PTSD (Fischer, 2015). In a meta-analysis of studies published since 2001, 13.2% of infantry service members met the criteria for PTSD, with incidence rising dramatically to 25–30% in units with high levels of direct combat exposure (Kok, Herrell, Thomas, & Hoge, 2012). Moreover, as of early 2013, the prevalence of PTSD among discharged veterans receiving treatment at Veteran Affairs (VA) clinics was reported to be 29% (Fischer, 2013). These findings make a compelling case for a continued focus on developing and enhancing the availability of diverse evidence- based treatment options to address this military behavioral healthcare challenge. One emerging area of research and clinical focus is of the use of Virtual Reality (VR) simulation technology as a tool for delivering evidence-based approaches for the assessment and treatment of PTSD. Although in recent times, the popular media has lavishly reported on VR’s potential impact on all elements of our evolving digital culture, and has created the impression that VR is a novel technology, the reality is that VR is not a new concept, and many of its developmental roots are traceable to the 1980s and 1990s (Schnipper et al., 2015). Moreover, a large scientific literature has emerged over the last 20 years demonstrating the unique and added value that is accrued with the use of VR to address a wide range of clinical health conditions (Rizzo 1994; Rizzo et al., 1997; 2002; 2010; 2014; Rizzo, Cukor et al., 2015). Within that context, the present chapter will summarize the ways that researchers and clinicians have employed VR to create relevant simulations that can be applied to the assessment and treatment of PTSD.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Auerbach, Daniel; Mehta, Tirth R.; Hartholt, Arno
Building a Backbone for Multi-Agent Tutoring in GIFT (Work in Progress) Proceedings Article
In: Proceedings of the GIFTSym5, pp. 23–35, ARL, Orlando, Florida, 2017.
@inproceedings{nye_building_2017,
title = {Building a Backbone for Multi-Agent Tutoring in GIFT (Work in Progress)},
author = {Benjamin D. Nye and Daniel Auerbach and Tirth R. Mehta and Arno Hartholt},
url = {https://books.google.com/books?id=PwMtDwAAQBAJ&printsec=copyright&source=gbs_pub_info_r#v=onepage&q&f=false},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the GIFTSym5},
pages = {23–35},
publisher = {ARL},
address = {Orlando, Florida},
abstract = {As intelligent tutoring systems (ITS) increasingly need to interoperate and co-exist, emerging systems have transitioned toward service-oriented designs to enable modularity and composability of tutoring components made and/or maintained by different research and development groups. However, as a research community, we have still not reached a point where it is trivial for a new service to be added into a system like the Generalized Intelligent Framework for Tutoring (GIFT; Sottilare, Goldberg, Brawner, & Holden, 2012). In an early paper considering this issue with respect to the GIFT architecture (Nye & Morrison, 2013), we proposed addressing this issue by building toward a lightweight multi-agent archi-tecture where certain services act as autonomous agents: “a system situated within and a part of an environment that senses that environment and acts on it, over time, in pursuit of its own agenda and so as to affect what it senses in the future” (Franklin & Graesser, 1997; p. 25). In our work in progress described here, we discuss how we are approaching the opportunity to build such capabilities into GIFT. The high level goals of our work are targeting two core goals for GIFT: A) to be a lightweight framework that will expand access to and use of ITS and B) to help GIFT to increase the intelligence and effectiveness of its services based on data over time. We are currently targeting the first goal, which will underpin the second goal. However, what does it mean to be a lightweight framework? In this context, a “lightweight framework” is framed as minimizing the following criteria: (1) hardware requirements, (2) software expertise to design services, (3) software expertise to use existing services, (4) software expertise to stand up the message-passing layer between agents, and (5) a minimal working message ontology (Nye & Morrison, 2013). Since our original paper four years ago, GIFT has made significant strides in reducing barriers related to hardware by building a cloud-based version and software expertise to use GIFT services through authoring tools. It has also developed a growing ontology of messages (e.g., https://gifttutoring.org/projects/gift/wiki/Interface_Control_Document_2016-1). With that said, despite now-extensive documentation, designing new services for GIFT is still not trivial and strong expertise is required to pass messages between GIFT modules and agents (either internal or external). To address these issues, the Building a Backbone project is working toward agent-oriented designs that build on GIFT's existing service-oriented framework. By moving from services toward agents, modules will be able to act more autonomously, enabling capabilities such as plug-and-play, hotswapping, and selecting between multiple services providing the same capabilities. These new capabilities are intended to reduce barriers to building new GIFT-compatible services and also to integrating GIFT with other service-oriented ecosystems. The first steps toward these capabilities are an ontology mapping service and an initial integration that combines GIFT, the Virtual Human Toolkit core framework for agents, and the SuperGLU framework for adding agent-oriented capabilities for coordinating services. This paper reports on work to date, with an emphasis on target capabilities, design decisions, challenges, and open research questions for this work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Scherer, Scherer; DeVault, David; Gratch, Jonathan; Artstein, Ronald; Hartholt, Arno; Lucas, Gale; Marsella, Stacy; Morbini, Fabrizio; Nazarian, Angela; Stratou, Giota; Traum, David; Wood, Rachel; Boberg, Jill; Morency, Louis Philippe
Detection and computational analysis of psychological signals using a virtual human interviewing agent Journal Article
In: Journal of Pain Management, pp. 311–321, 2016, ISSN: 1939-5914.
@article{rizzo_detection_2016,
title = {Detection and computational analysis of psychological signals using a virtual human interviewing agent},
author = {Albert Rizzo and Scherer Scherer and David DeVault and Jonathan Gratch and Ronald Artstein and Arno Hartholt and Gale Lucas and Stacy Marsella and Fabrizio Morbini and Angela Nazarian and Giota Stratou and David Traum and Rachel Wood and Jill Boberg and Louis Philippe Morency},
url = {http://www.icdvrat.org/2014/papers/ICDVRAT2014_S03N3_Rizzo_etal.pdf},
issn = {1939-5914},
year = {2016},
date = {2016-11-01},
journal = {Journal of Pain Management},
pages = {311–321},
abstract = {It has long been recognized that facial expressions, body posture/gestures and vocal parameters play an important role in human communication and the implicit signalling of emotion. Recent advances in low cost computer vision and behavioral sensing technologies can now be applied to the process of making meaningful inferences as to user state when a person interacts with a computational device. Effective use of this additive information could serve to promote human interaction with virtual human (VH) agents that may enhance diagnostic assessment. This paper will focus on our current research in these areas within the DARPA-funded "Detection and Computational Analysis of Psychological Signals" project, with specific attention to the SimSensei application use case. SimSensei is a virtual human interaction platform that is able to sense and interpret real-time audiovisual behavioral signals from users interacting with the system. It is specifically designed for health care support and leverages years of virtual human research and development at USC-ICT. The platform enables an engaging face-to-face interaction where the virtual human automatically reacts to the state and inferred intent of the user through analysis of behavioral signals gleaned from facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human to human interaction and communication, SimSensei aims to capture and infer from user non-verbal communication to improve engagement between a VH and a user. The system can also quantify and interpret sensed behavioral signals longitudinally that can be used to inform diagnostic assessment within a clinical context.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Nye, Benjamin D.; Hartholt, Arno; Reilly, Adam; Graesser, Arthur C.; VanLehn, Kurt; Wetzel, Jon; Liewer, Matt; Morbini, Fabrizio; Morgan, Brent; Wang, Lijia; Benn, Grace; Rosenberg, Milton
Designing a Personal Assistant for Life-Long Learning (PAL3) Proceedings Article
In: Proceedings of The Twenty-Ninth International Flairs Conference, pp. 491–496, AAAI Press, Key Largo, FL, 2016, ISBN: 978-1-57735-756-8.
@inproceedings{swartout_designing_2016,
title = {Designing a Personal Assistant for Life-Long Learning (PAL3)},
author = {William Swartout and Benjamin D. Nye and Arno Hartholt and Adam Reilly and Arthur C. Graesser and Kurt VanLehn and Jon Wetzel and Matt Liewer and Fabrizio Morbini and Brent Morgan and Lijia Wang and Grace Benn and Milton Rosenberg},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12793},
isbn = {978-1-57735-756-8},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of The Twenty-Ninth International Flairs Conference},
pages = {491–496},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Learners’ skills decay during gaps in instruction, since they lack the structure and motivation to continue studying. To meet this challenge, the PAL3 system was designed to accompany a learner throughout their career and mentor them to build and maintain skills through: 1) the use of an embodied pedagogical agent (Pal), 2) a persistent learning record that drives a student model which estimates forgetting, 3) an adaptive recommendation engine linking to both intelligent tutors and traditional learning resources, and 4) game-like mechanisms to promote engagement (e.g., leaderboards, effort-based point rewards, unlocking customizations). The design process for PAL3 is discussed, from the perspective of insights and revisions based on a series of formative feedback and evaluation sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Morency, Louis-Philippe; DeVault, David; Hartholt, Arno; Fast, Edward; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Stacy, Marcella; Traum, David; Rizzo, Albert
A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews Proceedings Article
In: Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on, pp. 787–789, IEEE, Xi'an, China, 2015.
@inproceedings{stratou_demonstration_2015,
title = {A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews},
author = {Giota Stratou and Louis-Philippe Morency and David DeVault and Arno Hartholt and Edward Fast and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Marcella Stacy and David Traum and Albert Rizzo},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7344661},
doi = {10.1109/ACII.2015.7344661},
year = {2015},
date = {2015-09-01},
booktitle = {Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on},
pages = {787–789},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. With this demo, we focus our attention on the perception part of the system, a multimodal framework which captures and analyzes user state behavior for both behavioral understanding and interactional purposes. We will demonstrate real-time user state sensing as a part of the SimSensei architecture and discuss how this technology enabled automatic analysis of behaviors related to psychological distress.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bruijnes, Merijn; Akker, Rieks; Hartholt, Arno; Heylen, Dirk
Virtual Suspect William Proceedings Article
In: Intelligent Virtual Agents, pp. 67–76, Springer, 2015.
@inproceedings{bruijnes_virtual_2015,
title = {Virtual Suspect William},
author = {Merijn Bruijnes and Rieks Akker and Arno Hartholt and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Virtual%20Suspect%20William.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {67–76},
publisher = {Springer},
abstract = {We evaluate an algorithm which computes the responses of an agent that plays the role of a suspect in simulations of police interrogations. The algorithm is based on a cognitive model - the response model - that is centred around keeping track of interpersonal relations. The model is parametrized in such a way that different personalities of the virtual suspect can be defined. In the evaluation we defined three different personalities and had participants guess the personality based on the responses the model provided in an interaction with the participant. We investigate what factors contributed to the ability of a virtual agent to show behaviour that was recognized by participants as belonging to a persona.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Stratou, Giota; DeVault, David; Hartholt, Arno; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Marsella, Stacy; Traum, David; Rizzo, Albert "Skip"
SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications Proceedings Article
In: Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI), Austin, Texas, 2015.
@inproceedings{morency_simsensei_2015,
title = {SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications},
author = {Louis-Philippe Morency and Giota Stratou and David DeVault and Arno Hartholt and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Stacy Marsella and David Traum and Albert "Skip" Rizzo},
url = {http://ict.usc.edu/pubs/SimSensei%20Demonstration%20A%20Perceptive%20Virtual%20Human%20Interviewer%20for%20Healthcare%20Applications.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI)},
address = {Austin, Texas},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. We emphasize on the perception part of the system, a multimodal framework which captures and analyzes user state for both behavioral understanding and interactional purposes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Scherer, Stefan; DeVault, David; Gratch, Jonathan; Artstein, Ron; Hartholt, Arno; Lucas, Gale; Marsella, Stacy; Morbini, Fabrizio; Nazarian, Angela; Stratou, Giota; Traum, David; Wood, Rachel; Boberg, Jill; Morency, Louis-Philippe
Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent Proceedings Article
In: Proceedings of ICDVRAT 2014, International Journal of Disability and Human Development, Gothenburg, Sweden, 2014.
@inproceedings{rizzo_detection_2014,
title = {Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent},
author = {Albert Rizzo and Stefan Scherer and David DeVault and Jonathan Gratch and Ron Artstein and Arno Hartholt and Gale Lucas and Stacy Marsella and Fabrizio Morbini and Angela Nazarian and Giota Stratou and David Traum and Rachel Wood and Jill Boberg and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Detection%20and%20Computational%20Analysis%20of%20Psychological%20Signals%20Using%20a%20Virtual%20Human%20Interviewing%20Agent.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of ICDVRAT 2014},
publisher = {International Journal of Disability and Human Development},
address = {Gothenburg, Sweden},
abstract = {It has long been recognized that facial expressions, body posture/gestures and vocal parameters play an important role in human communication and the implicit signalling of emotion. Recent advances in low cost computer vision and behavioral sensing technologies can now be applied to the process of making meaningful inferences as to user state when a person interacts with a computational device. Effective use of this additive information could serve to promote human interaction with virtual human (VH) agents that may enhance diagnostic assessment. This paper will focus on our current research in these areas within the DARPA-funded “Detection and Computational Analysis of Psychological Signals” project, with specific attention to the SimSensei application use case. SimSensei is a virtual human interaction platform that is able to sense and interpret real-time audiovisual behavioral signals from users interacting with the system. It is specifically designed for health care support and leverages years of virtual human research and development at USC-ICT. The platform enables an engaging face-to-face interaction where the virtual human automatically reacts to the state and inferred intent of the user through analysis of behavioral signals gleaned from facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human to human interaction and communication, SimSensei aims to capture and infer from user non-verbal communication to improve engagement between a VH and a user. The system can also quantify and interpret sensed behavioral signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2008
Hartholt, Arno; Russ, Thomas; Traum, David; Hovy, Eduard; Robinson, Susan
A Common Ground for Virtual Humans: Using an Ontology in a Natural Language Oriented Virtual Human Architecture Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hartholt_common_2008,
title = {A Common Ground for Virtual Humans: Using an Ontology in a Natural Language Oriented Virtual Human Architecture},
author = {Arno Hartholt and Thomas Russ and David Traum and Eduard Hovy and Susan Robinson},
url = {http://ict.usc.edu/pubs/A%20Common%20Ground%20for%20Virtual%20Humans-%20Using%20an%20Ontology%20in%20a%20Natural%20Language%20Oriented%20Virtual%20Human%20Architecture.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {When dealing with large, distributed systems that use state-of-the-art components, individual components are usually developed in parallel. As development continues, the decoupling invariably leads to a mismatch between how these components internally represent concepts and how they communicate these representations to other components: representations can get out of synch, contain localized errors, or become manageable only by a small group of experts for each module. In this paper, we describe the use of an ontology as part of a complex distributed virtual human architecture in order to enable better communication between modules while improving the overall flexibility needed to change or extend the system. We focus on the natural language understanding capabilities of this architecture and the relationship between language and concepts within the entire system in general and the ontology in particular.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2007
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Swartout, William; Traum, David; Marsella, Stacy C.; Piepol, Diane
Building Interactive Virtual Humans for Training Environments Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), Orlando, FL, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_building_2007,
title = {Building Interactive Virtual Humans for Training Environments},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and William Swartout and David Traum and Stacy C. Marsella and Diane Piepol},
url = {http://ict.usc.edu/pubs/Building%20Interactive%20Virtual%20Humans%20for%20Training%20Environments.pdf},
year = {2007},
date = {2007-11-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {There is a great need in the Joint Forces to have human to human interpersonal training for skills such as negotiation, leadership, interviewing and cultural training. Virtual environments can be incredible training tools if used properly and used for the correct training application. Virtual environments have already been very successful in training Warfighters how to operate vehicles and weapons systems. At the Institute for Creative Technologies (ICT) we have been exploring a new question: can virtual environments be used to train Warfighters in interpersonal skills such as negotiation, tactical questioning and leadership that are so critical for success in the contemporary operating environment? Using embodied conversational agents to create this type of training system has been one of the goals of the Virtual Humans project at the institute. ICT has a great deal of experience building complex, integrated and immersive training systems that address the human factor needs for training experiences. This paper will address the research, technology and value of developing virtual humans for training environments. This research includes speech recognition, natural language understanding & generation, dialogue management, cognitive agents, emotion modeling, question response managers, speech generation and non-verbal behavior. Also addressed will be the diverse set of training environments we have developed for the system, from single computer laptops to multi-computer immersive displays to real and virtual integrated environments. This paper will also discuss the problems, issues and solutions we encountered while building these systems. The paper will recount subject testing we have performed in these environments and results we have obtained from users. Finally the future of this type of Virtual Humans technology and training applications will be discussed.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Traum, David; Marsella, Stacy C.; Swartout, William
The More the Merrier: Multi-Party Negotiation with Virtual Humans Proceedings Article
In: AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence, pp. 1970–1971, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_more_2007,
title = {The More the Merrier: Multi-Party Negotiation with Virtual Humans},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and David Traum and Stacy C. Marsella and William Swartout},
url = {http://ict.usc.edu/pubs/The%20More%20the%20Merrier-%20Multi-Party%20Negotiation%20with%20Virtual%20Humans.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence},
volume = {2},
pages = {1970–1971},
abstract = {The goal of the Virtual Humans Project at the University of Southern California�s Institute for Creative Technologies is to enrich virtual training environments with virtual humans � autonomous agents that support face-to-face interaction with trainees in a variety of roles � through bringing together many different areas of research including speech recognition, natural language understanding, dialogue management, cognitive modeling, emotion modeling, non-verbal behavior and speech and knowledge management. The demo at AAAI will focus on our work using virtual humans to train negotiation skills. Conference attendees will negotiate with a virtual human doctor and elder to try to move a clinic out of harm�s way in single and multi-party negotiation scenarios using the latest iteration of our Virtual Humans framework. The user will use natural speech to talk to the embodied agents, who will respond in accordance with their internal task model and state. The characters will carry out a multi-party dialogue with verbal and non-verbal behavior. A video of a single-party version of the scenario was shown at AAAI-06. This new interactive demo introduces several new features, including multi-party negotiation, dynamically generated non-verbal behavior and a central ontology.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2004
Muller, T. J.; Hartholt, Arno; Marsella, Stacy C.; Gratch, Jonathan; Traum, David
Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{muller_you_2004,
title = {Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue},
author = {T. J. Muller and Arno Hartholt and Stacy C. Marsella and Jonathan Gratch and David Traum},
url = {http://ict.usc.edu/pubs/Do%20you%20want%20to%20talk%20about%20it.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Kloster Irsee, Germany},
abstract = {In this paper, we descrribe an implemented system for emotion-referring dialogue. An agen can engage in emotion-referring dialogue if it first has a model of its own emotions, and secondly has a way of talking about them. We create this facility in MRE Project's virtual humans, building upon the existing emotion and dialogue facilities of these agents.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Muller, T. J.
Interaction on Emotions Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 02.2004, 2004.
Abstract | Links | BibTeX | Tags:
@techreport{hartholt_interaction_2004,
title = {Interaction on Emotions},
author = {Arno Hartholt and T. J. Muller},
url = {http://ict.usc.edu/pubs/Interaction%20on%20emotions.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 02.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This report describes the addition of an emotion dialogue to the Mission Rehearsal Exercise (MRE) system. The goal of the MRE system is to provide an immersive learning environment for army officer recruits. The user can engage in conversation with several intelligent agents in order to accomplish the goals within a certain scenario. Although these agents did already posses emotions, they were unable to express them verbally. A question - answer dialogue has been implemented to this purpose. The implementation makes use of proposition states for modelling knowledge, keyword scanning for natural language understanding and templates for natural language generation. The system is implemented using Soar and TCL. An agent can understand emotion related questions in four different domains, type, intensity, state, and the combination of responsible-agent and blameworthiness. Some limitations arise due to the techniques used and to the relative short time frame in which the assignment was to be executed. Main issues are that the existing natural language understanding and generation modules could not be fully used, that very little context about the conversation is available and that the emotion states simplify the emotional state of an agent. These limitations and other thoughts give rise to the following recommendations for further work: * Make full use of references. * Use coping strategies for generating agent's utterances. * Use focus mechanisms for generating agent's utterances. * Extend known utterances. * Use NLU and NLG module. * Use emotion dialogue and states to influence emotions. * Fix known bugs.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
0000
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 0000.
@article{hartholt_introducing_nodate-1,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
pages = {11},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: pp. 35, 0000.
Abstract | BibTeX | Tags: DTIC, MedVR, Virtual Humans, VR
@article{hartholt_combat_nodate,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Arno Hartholt and Sharon Mozgai},
pages = {35},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
keywords = {DTIC, MedVR, Virtual Humans, VR},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 0000.
@article{hartholt_introducing_nodate,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
pages = {11},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Rizzo, Albert A; Hartholt, Arno
Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application Proceedings Article
In: 0000.
Abstract | BibTeX | Tags: Virtual Humans, VR
@inproceedings{mozgai_persuasive_nodate,
title = {Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application},
author = {Sharon Mozgai and Albert A Rizzo and Arno Hartholt},
abstract = {We are demoing Battle Buddy, an mHealth application designed to support access to physical and mental wellness content as well as safety planning for U.S. military veterans. This virtual human interface will collect multimodal data through passive sensors native to popular wearables (e.g., Apple Watch) and deliver adaptive multimedia content specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Battle Buddy can deliver health interventions matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). All interactions were specifically designed to engage and motivate by employing the persuasive strategies of (1) personalization, (2) self-monitoring, (3) tunneling, (4) suggestion, and (5) expertise.},
keywords = {Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}