Publications
Search
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR Proceedings Article
In: Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 308–3084, IEEE, San Diego, CA, USA, 2019, ISBN: 978-1-72815-604-0.
@inproceedings{hartholt_ubiquitous_2019,
title = {Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://ieeexplore.ieee.org/document/8942321/},
doi = {10.1109/AIVR46125.2019.00072},
isbn = {978-1-72815-604-0},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {308–3084},
publisher = {IEEE},
address = {San Diego, CA, USA},
abstract = {We present an architecture and framework for the development of virtual humans for a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The framework uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation, and rendering. This work builds on the Virtual Human Toolkit, which has been extended to support computing platforms beyond Windows. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in roomscale VR, autonomous AI in mobile AR, and real-time user performance feedback based on mobile sensors in headset AR.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert “Skip”
Systematic Representative Design and Clinical Virtual Reality Journal Article
In: Psychological Inquiry, vol. 30, no. 4, pp. 231–245, 2019, ISSN: 1047-840X, 1532-7965.
@article{mozgai_systematic_2019,
title = {Systematic Representative Design and Clinical Virtual Reality},
author = {Sharon Mozgai and Arno Hartholt and Albert “Skip” Rizzo},
url = {https://www.tandfonline.com/doi/full/10.1080/1047840X.2019.1693873},
doi = {10.1080/1047840X.2019.1693873},
issn = {1047-840X, 1532-7965},
year = {2019},
date = {2019-10-01},
journal = {Psychological Inquiry},
volume = {30},
number = {4},
pages = {231–245},
abstract = {The authors of the article, “Causal Inference in Generalizable Environments: Systematic Representative Design”, boldly announce their core point in the opening line of the abstract stating that, “Causal inference and generalizability both matter.” While a surface glance might suggest this to be a simple notion, a closer examination reveals the complexity of what they are proposing. This complexity is apparent when one considers that the bulk of human experimental research has always been challenged in its inability to concurrently deliver on both of these aims. This is no slight on the tens of 1000’s of human researchers and behavioral scientists who have devoted long careers to highly controlled human psychological and social science laboratory research. Rather, it reflects the sheer enormity of the challenges for conducting human studies designed to specify human function with physics-informed lab methods, while at the same time producing results that lead to enhanced understanding and prediction of how people will operate in the complex and ever-changing contexts that make up everyday life. At the core of this issue is a methodological and philosophical challenge that is relevant to all areas of human subjects’ research, beyond the social science focus of the Miller et al. (this issue) article. It is our aim to discuss the central topics in their article through the lens of our own work using Virtual/Augmented Reality and Virtual Human simulation technologies for clinical and training applications},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon; Fast, Ed; Liewer, Matt; Reilly, Adam; Whitcup, Wendy; Rizzo, Albert "Skip"
Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers Proceedings Article
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 205–207, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
@inproceedings{hartholt_virtual_2019-1,
title = {Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers},
author = {Arno Hartholt and Sharon Mozgai and Ed Fast and Matt Liewer and Adam Reilly and Wendy Whitcup and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3352766},
doi = {10.1145/3349537.3352766},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {205–207},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {We present one of the first applications of virtual humans in Augmented Reality (AR), which allows young adults with Autism Spectrum Disorder (ASD) the opportunity to practice job interviews. It uses the Magic Leap’s AR hardware sensors to provide users with immediate feedback on six different metrics, including eye gaze, blink rate and head orientation. The system provides two characters, with three conversational modes each. Ported from an existing desktop application, the main development lessons learned were: 1) provide users with navigation instructions in the user interface, 2) avoid dark colors as they are rendered transparently, 3) use dynamic gaze so characters maintain eye contact with the user, 4) use hardware sensors like eye gaze to provide user feedback, and 5) use surface detection to place characters dynamically in the world.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon; Rizzo, Albert "Skip"
Virtual Job Interviewing Practice for High-Anxiety Populations Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 238–240, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
@inproceedings{hartholt_virtual_2019,
title = {Virtual Job Interviewing Practice for High-Anxiety Populations},
author = {Arno Hartholt and Sharon Mozgai and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329417},
doi = {10.1145/3308532.3329417},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {238–240},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present a versatile system for training job interviewing skills that focuses specifically on segments of the population facing increased challenges during the job application process. In particular, we target those with Autism Spectrum Disorder (ADS), veterans transitioning to civilian life, and former convicts integrating back into society. The system itself follows the SAIBA framework and contains several interviewer characters, who each represent a different type of vocational field, (e.g. service industry, retail, office, etc.) Each interviewer can be set to one of three conversational modes, which not only affects what they say and how they say it, but also their supporting body language. This approach offers varying difficulties, allowing users to start practicing with interviewers who are more encouraging and accommodating before moving on to personalities that are more direct and indifferent. Finally, the user can place the interviewers in different environmental settings (e.g. conference room, restaurant, executive office, etc.), allowing for many different combinations in which to practice.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chu, Veronica C.; Lucas, Gale M.; Lei, Su; Mozgai, Sharon; Khooshabeh, Peter; Gratch, Jonathan
Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat Journal Article
In: Frontiers in Human Neuroscience, vol. 13, 2019, ISSN: 1662-5161.
@article{chu_emotion_2019,
title = {Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat},
author = {Veronica C. Chu and Gale M. Lucas and Su Lei and Sharon Mozgai and Peter Khooshabeh and Jonathan Gratch},
url = {https://www.frontiersin.org/article/10.3389/fnhum.2019.00050/full},
doi = {10.3389/fnhum.2019.00050},
issn = {1662-5161},
year = {2019},
date = {2019-02-01},
journal = {Frontiers in Human Neuroscience},
volume = {13},
abstract = {The current study examines cooperation and cardiovascular responses in individuals that were defected on by their opponent in the first round of an iterated Prisoner’s Dilemma. In this scenario, participants were either primed with the emotion regulation strategy of reappraisal or no emotion regulation strategy, and their opponent either expressed an amused smile or a polite smile after the results were presented. We found that cooperation behavior decreased in the no emotion regulation group when the opponent expressed an amused smile compared to a polite smile. In the cardiovascular measures, we found significant differences between the emotion regulation conditions using the biopsychosocial (BPS) model of challenge and threat. However, the cardiovascular measures of participants instructed with the reappraisal strategy were only weakly comparable with a threat state of the BPS model, which involves decreased blood flow and perception of greater task demands than resources to cope with those demands. Conversely, the cardiovascular measures of participants without an emotion regulation were only weakly comparable with a challenge state of the BPS model, which involves increased blood flow and perception of having enough or more resources to cope with task demands.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Lucas, Gale; Mozgai, Sharon; Boberg, Jill; Artstein, Ron; Gratch, Jonathan
Towards a Repeated Negotiating Agent that Treats People Individually: Cooperation, Social Value Orientation, & Machiavellianism Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 125–132, ACM, Sydney, Australia, 2018, ISBN: ISBN: 978-1-4503-6013-5.
@inproceedings{mell_towards_2018,
title = {Towards a Repeated Negotiating Agent that Treats People Individually: Cooperation, Social Value Orientation, & Machiavellianism},
author = {Johnathan Mell and Gale Lucas and Sharon Mozgai and Jill Boberg and Ron Artstein and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3267910},
doi = {10.1145/3267851.3267910},
isbn = {ISBN: 978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {125–132},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {We present the results of a study in which humans negotiate with computerized agents employing varied tactics over a repeated number of economic ultimatum games. We report that certain agents are highly effective against particular classes of humans: several individual difference measures for the human participant are shown to be critical in determining which agents will be successful. Asking for favors works when playing with pro-social people but backfires with more selfish individuals. Further, making poor offers invites punishment from Machiavellian individuals. These factors may be learned once and applied over repeated negotiations, which means user modeling techniques that can detect these differences accurately will be more successful than those that don’t. Our work additionally shows that a significant benefit of cooperation is also present in repeated games—after sufficient interaction. These results have deep significance to agent designers who wish to design agents that are effective in negotiating with a broad swath of real human opponents. Furthermore, it demonstrates the effectiveness of techniques which can reason about negotiation over time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Neubauer, Catherine; Mozgai, Sharon; Scherer, Stefan; Woolley, Joshua; Chuang, Brandon
Manual and Automatic Measures Confirm-Intranasal Oxytocin Increases Facial Expressivity Journal Article
In: Affective Computing and Intelligent Interaction, 2017.
@article{neubauer_manual_2017,
title = {Manual and Automatic Measures Confirm-Intranasal Oxytocin Increases Facial Expressivity},
author = {Catherine Neubauer and Sharon Mozgai and Stefan Scherer and Joshua Woolley and Brandon Chuang},
url = {https://www.researchgate.net/publication/321644417_Manual_and_Automatic_Measures_Confirm-Intranasal_Oxytocin_Increases_Facial_Expressivity?enrichId=rgreq-22efb1e32ef30cdd22e6bee2b3b63d56-XXX&enrichSource=Y292ZXJQYWdlOzMyMTY0NDQxNztBUzo1NjkwNTI4NzM4NTQ5NzZAMTUxMjY4NDE4NTcyOQ%3D%3D&el=1_x_2&_esc=publicationCoverPdf},
year = {2017},
date = {2017-12-01},
journal = {Affective Computing and Intelligent Interaction},
abstract = {The effects of oxytocin on facial emotional expressivity were investigated in individuals with schizophrenia and age-matched healthy controls during the completion of a Social Judgment Task (SJT) with a double-blind, placebo-controlled, cross-over design. Although pharmacological interventions exist to help alleviate some symptoms of schizophrenia, currently available agents are not effective at improving the severity of blunted facial affect. Participant facial expressivity was previously quantified from video recordings of the SJT using a wellvalidated manual approach (Facial Expression Coding System; FACES). We confirm these findings using an automated computer-based approach. Using both methods we found that the administration of oxytocin significantly increased total facial expressivity in individuals with schizophrenia and increased facial expressivity at trend level in healthy controls. Secondary analysis showed that oxytocin also significantly increased the frequency of negative valence facial expressions in individuals with schizophrenia but not in healthy controls and that oxytocin did not significantly increase positive valence facial expressions in either group. Both manual coding and automatic facial analysis revealed the same pattern of findings. Considering manual annotation can be expensive and timeconsuming, these results suggest that automatic facial analysis may be an efficient and cost-effective alternative to currently utilized manual approaches and may be ready for use in clinical settings.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Chollet, Mathieu; Mozgai, Sharon; Dennison, Mark; Khooshabeh, Peter; Scherer, Stefan
The relationship between task-induced stress, vocal changes, and physiological state during a dyadic team task Proceedings Article
In: Proceedings of the 19th ACM International Conference on Multimodal Interaction, pp. 426–432, ACM Press, Glasgow, UK, 2017, ISBN: 978-1-4503-5543-8.
@inproceedings{neubauer_relationship_2017,
title = {The relationship between task-induced stress, vocal changes, and physiological state during a dyadic team task},
author = {Catherine Neubauer and Mathieu Chollet and Sharon Mozgai and Mark Dennison and Peter Khooshabeh and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=3136755.3136804},
doi = {10.1145/3136755.3136804},
isbn = {978-1-4503-5543-8},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of the 19th ACM International Conference on Multimodal Interaction},
pages = {426–432},
publisher = {ACM Press},
address = {Glasgow, UK},
abstract = {It is commonly known that a relationship exists between the human voice and various emotional states. Past studies have demonstrated changes in a number of vocal features, such as fundamental frequency f0 and peakSlope, as a result of varying emotional state. These voice characteristics have been shown to relate to emotional load, vocal tension, and, in particular, stress. Although much research exists in the domain of voice analysis, few studies have assessed the relationship between stress and changes in the voice during a dyadic team interaction. The aim of the present study was to investigate the multimodal interplay between speech and physiology during a high-workload, high-stress team task. Specifically, we studied task-induced effects on participants' vocal signals, specifically, the f0 and peakSlope features, as well as participants' physiology, through cardiovascular measures. Further, we assessed the relationship between physiological states related to stress and changes in the speaker's voice. We recruited participants with the specific goal of working together to diffuse a simulated bomb. Half of our sample participated in an "Ice Breaker" scenario, during which they were allowed to converse and familiarize themselves with their teammate prior to the task, while the other half of the sample served as our "Control". Fundamental frequency (f0), peakSlope, physiological state, and subjective stress were measured during the task. Results indicated that f0 and peakSlope significantly increased from the beginning to the end of each task trial, and were highest in the last trial, which indicates an increase in emotional load and vocal tension. Finally, cardiovascular measures of stress indicated that the vocal and emotional load of speakers towards the end of the task mirrored a physiological state of psychological "threat".},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ringeval, Fabien; Schuller, Björn; Valstar, Michel; Gratch, Jonathan; Cowie, Roddy; Pantic, Maja
Summary for AVEC 2017: Real-life Depression and Affect Challenge and Workshop Proceedings Article
In: Proceedings of the 2017 ACM on Multimedia Conference, pp. 1963–1964, ACM Press, Mountain View, CA, 2017, ISBN: 978-1-4503-4906-2.
@inproceedings{ringeval_summary_2017,
title = {Summary for AVEC 2017: Real-life Depression and Affect Challenge and Workshop},
author = {Fabien Ringeval and Björn Schuller and Michel Valstar and Jonathan Gratch and Roddy Cowie and Maja Pantic},
url = {http://dl.acm.org/citation.cfm?doid=3123266.3132049},
doi = {10.1145/3123266.3132049},
isbn = {978-1-4503-4906-2},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of the 2017 ACM on Multimedia Conference},
pages = {1963–1964},
publisher = {ACM Press},
address = {Mountain View, CA},
abstract = {The seventh Audio-Visual Emotion Challenge and workshop AVEC 2017 was held in conjunction with ACM Multimedia'17. This year, the AVEC series addresses two distinct sub-challenges: emotion recognition and depression detection. The Affect Sub-Challenge is based on a novel dataset of human-human interactions recorded 'in-the-wild', whereas the Depression Sub-Challenge is based on the same dataset as the one used in AVEC 2016, with human-agent interactions. In this summary, we mainly describe participation and its conditions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Lucas, Gale; Gratch, Jonathan
To Tell the Truth: Virtual Agents and Morning Morality Proceedings Article
In: Proceedings of the 17th International Conference on Intelligent Virtual Agents, pp. 283–286, Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
@inproceedings{mozgai_tell_2017,
title = {To Tell the Truth: Virtual Agents and Morning Morality},
author = {Sharon Mozgai and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_37},
doi = {10.1007/978-3-319-67401-8_37},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 17th International Conference on Intelligent Virtual Agents},
pages = {283–286},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {This paper investigates the impact of time of day on truthfulness in human-agent interactions. Time of day has been found to have important implications for moral behavior in human-human interaction. Namely, the morning morality effect shows that people are more likely to act ethically (i.e., tell fewer lies) in the morning than in the afternoon. Based on previous work on disclosure and virtual agents, we propose that this effect will not bear out in human-agent interactions. Preliminary evaluation shows that individuals who lie when engaged in multi-issue bargaining tasks with the Conflict Resolution Agent, a semi-automated virtual human, tell more lies to human negotiation partners than virtual agent negotiation partners in the afternoon and are more likely to tell more lies in the afternoon than in the morning when they believe they are negotiating with a human. Time of day does not have a significant effect on the amount of lies told to the virtual agent during the multi-issue bargaining task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.