Publications
Search
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.; Busta, Kelly
Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities Journal Article
In: International Journal of Disaster Risk Reduction, vol. 118, pp. 105225, 2025, ISSN: 22124209.
@article{liu_impact_2025,
title = {Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas and Kelly Busta},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2212420925000494},
doi = {10.1016/j.ijdrr.2025.105225},
issn = {22124209},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-20},
journal = {International Journal of Disaster Risk Reduction},
volume = {118},
pages = {105225},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Marti, Deniz; Budathoki, Anjila; Ding, Yi; Lucas, Gale; Nelson, David
How Does Acknowledging Users’ Preferences Impact AI’s Ability to Make Conflicting Recommendations? Journal Article
In: International Journal of Human–Computer Interaction, pp. 1–12, 2024, ISSN: 1044-7318, 1532-7590.
@article{marti_how_2024,
title = {How Does Acknowledging Users’ Preferences Impact AI’s Ability to Make Conflicting Recommendations?},
author = {Deniz Marti and Anjila Budathoki and Yi Ding and Gale Lucas and David Nelson},
url = {https://www.tandfonline.com/doi/full/10.1080/10447318.2024.2426035},
doi = {10.1080/10447318.2024.2426035},
issn = {1044-7318, 1532-7590},
year = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {International Journal of Human–Computer Interaction},
pages = {1–12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bonial, Claire; Lukin, Stephanie M.; Abrams, Mitchell; Baker, Anthony; Donatelli, Lucia; Foots, Ashley; Hayes, Cory J.; Henry, Cassidy; Hudson, Taylor; Marge, Matthew; Pollard, Kimberly A.; Artstein, Ron; Traum, David; Voss, Clare R.
Human–robot dialogue annotation for multi-modal common ground Journal Article
In: Lang Resources & Evaluation, 2024, ISSN: 1574-020X, 1574-0218.
@article{bonial_humanrobot_2024,
title = {Human–robot dialogue annotation for multi-modal common ground},
author = {Claire Bonial and Stephanie M. Lukin and Mitchell Abrams and Anthony Baker and Lucia Donatelli and Ashley Foots and Cory J. Hayes and Cassidy Henry and Taylor Hudson and Matthew Marge and Kimberly A. Pollard and Ron Artstein and David Traum and Clare R. Voss},
url = {https://link.springer.com/10.1007/s10579-024-09784-2},
doi = {10.1007/s10579-024-09784-2},
issn = {1574-020X, 1574-0218},
year = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {Lang Resources & Evaluation},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zaizar, Eric D.; Gramlich, Michael A.; Rizzo, Albert “Skip”; Reger, Greg M.; Norr, Aaron M.
In: Training and Education in Professional Psychology, 2024, ISSN: 1931-3926, 1931-3918.
@article{zaizar_exploration_2024,
title = {Exploration of the impact of baseline clinician learner characteristics on motivational interviewing skill improvement following training with a virtual standardized patient.},
author = {Eric D. Zaizar and Michael A. Gramlich and Albert “Skip” Rizzo and Greg M. Reger and Aaron M. Norr},
url = {https://doi.apa.org/doi/10.1037/tep0000490},
doi = {10.1037/tep0000490},
issn = {1931-3926, 1931-3918},
year = {2024},
date = {2024-08-01},
urldate = {2024-08-13},
journal = {Training and Education in Professional Psychology},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fischer, Katrin; Velentza, Anna-Maria; Lucas, Gale; Williams, Dmitri
Seeing Eye to Eye with Robots: An Experimental Study Predicting Trust in Social Robots for Domestic Use Proceedings Article
In: 2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN), pp. 2162–2168, IEEE, Pasadena, CA, USA, 2024, ISBN: 979-8-3503-7502-2.
@inproceedings{fischer_seeing_2024,
title = {Seeing Eye to Eye with Robots: An Experimental Study Predicting Trust in Social Robots for Domestic Use},
author = {Katrin Fischer and Anna-Maria Velentza and Gale Lucas and Dmitri Williams},
url = {https://ieeexplore.ieee.org/document/10731371/},
doi = {10.1109/RO-MAN60168.2024.10731371},
isbn = {979-8-3503-7502-2},
year = {2024},
date = {2024-08-01},
urldate = {2024-12-05},
booktitle = {2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN)},
pages = {2162–2168},
publisher = {IEEE},
address = {Pasadena, CA, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Han, Bin; Yau, Cleo; Lei, Su; Gratch, Jonathan
In-Depth Analysis of Emotion Recognition through Knowledge-Based Large Language Models Miscellaneous
2024, (arXiv:2408.00780 [cs]).
@misc{han_-depth_2024,
title = {In-Depth Analysis of Emotion Recognition through Knowledge-Based Large Language Models},
author = {Bin Han and Cleo Yau and Su Lei and Jonathan Gratch},
url = {http://arxiv.org/abs/2408.00780},
year = {2024},
date = {2024-07-01},
urldate = {2024-08-15},
publisher = {arXiv},
abstract = {Emotion recognition in social situations is a complex task that requires integrating information from both facial expressions and the situational context. While traditional approaches to automatic emotion recognition have focused on decontextualized signals, recent research emphasizes the importance of context in shaping emotion perceptions. This paper contributes to the emerging field of context-based emotion recognition by leveraging psychological theories of human emotion perception to inform the design of automated methods. We propose an approach that combines emotion recognition methods with Bayesian Cue Integration (BCI) to integrate emotion inferences from decontextualized facial expressions and contextual knowledge inferred via Large-language Models. We test this approach in the context of interpreting facial expressions during a social task, the prisoner's dilemma. Our results provide clear support for BCI across a range of automatic emotion recognition methods. The best automated method achieved results comparable to human observers, suggesting the potential for this approach to advance the field of affective computing.},
note = {arXiv:2408.00780 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Owayyed, Mohammed Al; Tielman, Myrthe; Hartholt, Arno; Specht, Marcus; Brinkman, Willem-Paul
Agent-based social skills training systems: the ARTES architecture, interaction characteristics, learning theories and future outlooks Journal Article
In: Behaviour & Information Technology, pp. 1–28, 2024, ISSN: 0144-929X, 1362-3001.
@article{al_owayyed_agent-based_2024,
title = {Agent-based social skills training systems: the ARTES architecture, interaction characteristics, learning theories and future outlooks},
author = {Mohammed Al Owayyed and Myrthe Tielman and Arno Hartholt and Marcus Specht and Willem-Paul Brinkman},
url = {https://www.tandfonline.com/doi/full/10.1080/0144929X.2024.2374891},
doi = {10.1080/0144929X.2024.2374891},
issn = {0144-929X, 1362-3001},
year = {2024},
date = {2024-07-01},
urldate = {2024-08-15},
journal = {Behaviour & Information Technology},
pages = {1–28},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yin, Yinxuan; Nayyar, Mollik; Holman, Daniel; Lucas, Gale; Holbrook, Colin; Wagner, Alan
Validation and Evacuee Modeling of Virtual Robot-guided Emergency Evacuation Experiments Miscellaneous
2024.
@misc{yin_validation_2024,
title = {Validation and Evacuee Modeling of Virtual Robot-guided Emergency Evacuation Experiments},
author = {Yinxuan Yin and Mollik Nayyar and Daniel Holman and Gale Lucas and Colin Holbrook and Alan Wagner},
url = {https://osf.io/mr78s},
doi = {10.31234/osf.io/mr78s},
year = {2024},
date = {2024-06-01},
urldate = {2024-09-17},
publisher = {Center for Open Science},
abstract = {Virtual Reality (VR) is an increasingly common tool for investigating human responses to emergency situations. Nonetheless, studies validating and comparing human subject behavior during real world emergencies to their responses in VR are notably rare, and no prior studies have validated whether human emergency responses to guidance from a robot are comparable in VR versus the real world. In the present pre-registered study, we used VR to replicate a previous robot- guided emergency evacuation study conducted in the real world and compared human subject behavior in matched physical and virtual environments. In both environments, human subjects were asked to follow a robot to a location and to then read an article. While reading, a fire alarm sounds. The robot then attempted to guide them to a distant, unfamiliar exit rather than nearby and familiar exits. We observed close correspondences between evacuee exit choice (the robot’s distant exit versus closer exits), evacuation time, and trust in the robot between the VR and physical environments. We further demonstrate that data collected in virtual reality can be used to create accurate motion models (mean error of 0.42 centimeters) predicting evacuee trajectories and locations in real life. Taken together, the results provide evidence for the ecological validity of VR approaches to studying human-robot interaction, particularly robot- guided emergency evacuation.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Mozgai, Sharon A; Kaurloto, Cari; Winn, Jade G; Leeds, Andrew; Beland, Sarah; Sookiassian, Arman; Hartholt, Arno
Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–8, ACM, Honolulu HI USA, 2024, ISBN: 979-8-4007-0331-7.
@inproceedings{mozgai_accelerating_2024,
title = {Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool},
author = {Sharon A Mozgai and Cari Kaurloto and Jade G Winn and Andrew Leeds and Sarah Beland and Arman Sookiassian and Arno Hartholt},
url = {https://dl.acm.org/doi/10.1145/3613905.3637110},
doi = {10.1145/3613905.3637110},
isbn = {979-8-4007-0331-7},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-18},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Hao; Chang, Di; Li, Fang; Soleymani, Mohammad; Ahuja, Narendra
MagicPose4D: Crafting Articulated Models with Appearance and Motion Control Miscellaneous
2024, (Version Number: 1).
@misc{zhang_magicpose4d_2024,
title = {MagicPose4D: Crafting Articulated Models with Appearance and Motion Control},
author = {Hao Zhang and Di Chang and Fang Li and Mohammad Soleymani and Narendra Ahuja},
url = {https://arxiv.org/abs/2405.14017},
doi = {10.48550/ARXIV.2405.14017},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-25},
publisher = {arXiv},
abstract = {With the success of 2D and 3D visual generative models, there is growing interest in generating 4D content. Existing methods primarily rely on text prompts to produce 4D content, but they often fall short of accurately defining complex or rare motions. To address this limitation, we propose MagicPose4D, a novel framework for refined control over both appearance and motion in 4D generation. Unlike traditional methods, MagicPose4D accepts monocular videos as motion prompts, enabling precise and customizable motion generation. MagicPose4D comprises two key modules:
i) Dual-Phase 4D Reconstruction Modulevphantom which operates in two phases. The first phase focuses on capturing the model's shape using accurate 2D supervision and less accurate but geometrically informative 3D pseudo-supervision without imposing skeleton constraints. The second phase refines the model using more accurate pseudo-3D supervision, obtained in the first phase and introduces kinematic chain-based skeleton constraints to ensure physical plausibility. Additionally, we propose a Global-local Chamfer loss that aligns the overall distribution of predicted mesh vertices with the supervision while maintaining part-level alignment without extra annotations.
ii) Cross-category Motion Transfer Modulevphantom leverages the predictions from the 4D reconstruction module and uses a kinematic-chain-based skeleton to achieve cross-category motion transfer. It ensures smooth transitions between frames through dynamic rigidity, facilitating robust generalization without additional training.
Through extensive experiments, we demonstrate that MagicPose4D significantly improves the accuracy and consistency of 4D content generation, outperforming existing methods in various benchmarks.},
note = {Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
i) Dual-Phase 4D Reconstruction Modulevphantom which operates in two phases. The first phase focuses on capturing the model's shape using accurate 2D supervision and less accurate but geometrically informative 3D pseudo-supervision without imposing skeleton constraints. The second phase refines the model using more accurate pseudo-3D supervision, obtained in the first phase and introduces kinematic chain-based skeleton constraints to ensure physical plausibility. Additionally, we propose a Global-local Chamfer loss that aligns the overall distribution of predicted mesh vertices with the supervision while maintaining part-level alignment without extra annotations.
ii) Cross-category Motion Transfer Modulevphantom leverages the predictions from the 4D reconstruction module and uses a kinematic-chain-based skeleton to achieve cross-category motion transfer. It ensures smooth transitions between frames through dynamic rigidity, facilitating robust generalization without additional training.
Through extensive experiments, we demonstrate that MagicPose4D significantly improves the accuracy and consistency of 4D content generation, outperforming existing methods in various benchmarks.
West, Taylor Nicole; Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; Hale, James; Gratch, Jonathan; Fredrickson, Barbara
2024.
@misc{west_improving_2024,
title = {Improving Social Connection with Weak Ties and Strangers: Effects of a New Micro-Intervention on Interaction Quality and Social Behavior},
author = {Taylor Nicole West and Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and James Hale and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/ytjr6},
doi = {10.31234/osf.io/ytjr6},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-25},
abstract = {We propose that the emotional quality of people’s interactions with acquaintances (i.e., weak ties) and strangers contributes to well-being. We test whether a new micro-intervention can raise the quality of these interactions. We randomized young adults (N = 335) to this connectedness micro-intervention or a control intervention. Both interventions were delivered via a psychoeducational video followed by a brief conversation with a virtual human, with whom participants developed if-then plans to carry out their assigned behavioral goal. Pre-intervention, high-quality weak-tie and stranger interactions were associated with lower loneliness and greater mental health independent of strong-tie interaction quality. Experimental data showed the connectedness intervention improved the emotional quality of participants' interactions with weak ties and strangers over two days, evident in participants’ episodic self-reports and faster in-lab conversational response time. Discussion centers on implications for developing scalable behavioral interventions to improve well-being.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Hartholt, Arno; Leeds, Andrew; Fast, Ed; Sookiassian, Edwin; Kim, Kevin; Beland, Sarah; Kulkarni, Pranav; Mozgai, Sharon
Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms Proceedings Article
In: 2024.
@inproceedings{hartholt_multidisciplinary_2024,
title = {Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms},
author = {Arno Hartholt and Andrew Leeds and Ed Fast and Edwin Sookiassian and Kevin Kim and Sarah Beland and Pranav Kulkarni and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-95-7/article/978-1-958651-95-7_33},
doi = {10.54941/ahfe1004497},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-16},
abstract = {The current pace of technological advancements has led to an ever-increasing availability of technologies to investigate and help address the challenges that contemporary society faces today. However, while this trend increases the potential for creating more relevant, effective, and efficient solutions, it also inherently increases the complexity of realizing that potential. Our work aims to manage this complexity through the creation and dissemination of integrated middleware platforms that enable researchers and developers to rapidly prototype novel solutions within the areas of modelling & simulation, virtual humans, and virtual worlds. In this paper, we discuss two related platforms: the Rapid Integration & Development Environment (RIDE) and the Virtual Human Toolkit (VHToolkit). Specifically, we explore two use cases: 1) the development of an authoring tool aimed at domain experts to rapidly create low-echelon military training scenarios, and 2) the development of a virtual human led mHealth wellness and suicide prevention app for veterans.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
Dyadic Interaction Modeling for Social Behavior Generation Miscellaneous
2024, (arXiv:2403.09069 [cs]).
@misc{tran_dyadic_2024,
title = {Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.09069},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-19},
publisher = {arXiv},
abstract = {Human-human communication is like a delicate dance where listeners and speakers concurrently interact to maintain conversational dynamics. Hence, an effective model for generating listener nonverbal behaviors requires understanding the dyadic context and interaction. In this paper, we present an effective framework for creating 3D facial motions in dyadic interactions. Existing work consider a listener as a reactive agent with reflexive behaviors to the speaker's voice and facial motions. The heart of our framework is Dyadic Interaction Modeling (DIM), a pre-training approach that jointly models speakers' and listeners' motions through masking and contrastive learning to learn representations that capture the dyadic context. To enable the generation of non-deterministic behaviors, we encode both listener and speaker motions into discrete latent representations, through VQ-VAE. The pre-trained model is further fine-tuned for motion generation. Extensive experiments demonstrate the superiority of our framework in generating listener motions, establishing a new state-of-the-art according to the quantitative measures capturing the diversity and realism of generated motions. Qualitative results demonstrate the superior capabilities of the proposed approach in generating diverse and realistic expressions, eye blinks and head gestures.},
note = {arXiv:2403.09069 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Lu, Liupei; Yin, Yufeng; Gu, Yuming; Wu, Yizhen; Prasad, Pratusha; Zhao, Yajie; Soleymani, Mohammad
Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection Miscellaneous
2024, (arXiv:2403.10737 [cs]).
@misc{lu_leveraging_2024,
title = {Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection},
author = {Liupei Lu and Yufeng Yin and Yuming Gu and Yizhen Wu and Pratusha Prasad and Yajie Zhao and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.10737},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
publisher = {arXiv},
abstract = {Facial action unit (AU) detection is a fundamental block for objective facial expression analysis. Supervised learning approaches require a large amount of manual labeling which is costly. The limited labeled data are also not diverse in terms of gender which can affect model fairness. In this paper, we propose to use synthetically generated data and multi-source domain adaptation (MSDA) to address the problems of the scarcity of labeled data and the diversity of subjects. Specifically, we propose to generate a diverse dataset through synthetic facial expression re-targeting by transferring the expressions from real faces to synthetic avatars. Then, we use MSDA to transfer the AU detection knowledge from a real dataset and the synthetic dataset to a target dataset. Instead of aligning the overall distributions of different domains, we propose Paired Moment Matching (PM2) to align the features of the paired real and synthetic data with the same facial expression. To further improve gender fairness, PM2 matches the features of the real data with a female and a male synthetic image. Our results indicate that synthetic data and the proposed model improve both AU detection performance and fairness across genders, demonstrating its potential to solve AU detection in-the-wild.},
note = {arXiv:2403.10737 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Kwon, Deuksin; Weiss, Emily; Kulshrestha, Tara; Chawla, Kushal; Lucas, Gale M.; Gratch, Jonathan
Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues Miscellaneous
2024, (arXiv:2402.13550 [cs]).
@misc{kwon_are_2024,
title = {Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues},
author = {Deuksin Kwon and Emily Weiss and Tara Kulshrestha and Kushal Chawla and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2402.13550},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
publisher = {arXiv},
abstract = {A successful negotiation demands a deep comprehension of the conversation context, Theory-of-Mind (ToM) skills to infer the partner's motives, as well as strategic reasoning and effective communication, making it challenging for automated systems. Given the remarkable performance of LLMs across a variety of NLP tasks, in this work, we aim to understand how LLMs can advance different aspects of negotiation research, ranging from designing dialogue systems to providing pedagogical feedback and scaling up data collection practices. To this end, we devise a methodology to analyze the multifaceted capabilities of LLMs across diverse dialogue scenarios covering all the time stages of a typical negotiation interaction. Our analysis adds to the increasing evidence for the superiority of GPT-4 across various tasks while also providing insights into specific tasks that remain difficult for LLMs. For instance, the models correlate poorly with human players when making subjective assessments about the negotiation dialogues and often struggle to generate responses that are contextually appropriate as well as strategically advantageous.},
note = {arXiv:2402.13550 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Murawski, Alaine; Ramirez‐Zohfeld, Vanessa; Mell, Johnathan; Tschoe, Marianne; Schierer, Allison; Olvera, Charles; Brett, Jeanne; Gratch, Jonathan; Lindquist, Lee A.
Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program Journal Article
In: J American Geriatrics Society, pp. jgs.18775, 2024, ISSN: 0002-8614, 1532-5415.
@article{murawski_development_2024,
title = {Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program},
author = {Alaine Murawski and Vanessa Ramirez‐Zohfeld and Johnathan Mell and Marianne Tschoe and Allison Schierer and Charles Olvera and Jeanne Brett and Jonathan Gratch and Lee A. Lindquist},
url = {https://agsjournals.onlinelibrary.wiley.com/doi/10.1111/jgs.18775},
doi = {10.1111/jgs.18775},
issn = {0002-8614, 1532-5415},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {J American Geriatrics Society},
pages = {jgs.18775},
abstract = {Abstract
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.
Gratch, Jonathan; Greene, Gretchen; Picard, Rosalind; Urquhart, Lachlan; Valstar, Michel
Guest Editorial: Ethics in Affective Computing Journal Article
In: IEEE Trans. Affective Comput., vol. 15, no. 1, pp. 1–3, 2024, ISSN: 1949-3045, 2371-9850.
@article{gratch_guest_2024,
title = {Guest Editorial: Ethics in Affective Computing},
author = {Jonathan Gratch and Gretchen Greene and Rosalind Picard and Lachlan Urquhart and Michel Valstar},
url = {https://ieeexplore.ieee.org/document/10454111/},
doi = {10.1109/TAFFC.2023.3322918},
issn = {1949-3045, 2371-9850},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-14},
journal = {IEEE Trans. Affective Comput.},
volume = {15},
number = {1},
pages = {1–3},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Shi, Zhonghao; O'Connell, Allison; Li, Zongjian; Liu, Siqi; Ayissi, Jennifer; Hoffman, Guy; Soleymani, Mohammad; Matarić, Maja J.
Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education Miscellaneous
2024, (arXiv:2402.01647 [cs]).
@misc{shi_build_2024,
title = {Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education},
author = {Zhonghao Shi and Allison O'Connell and Zongjian Li and Siqi Liu and Jennifer Ayissi and Guy Hoffman and Mohammad Soleymani and Maja J. Matarić},
url = {http://arxiv.org/abs/2402.01647},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {As artificial intelligence (AI) is playing an increasingly important role in our society and global economy, AI education and literacy have become necessary components in college and K-12 education to prepare students for an AI-powered society. However, current AI curricula have not yet been made accessible and engaging enough for students and schools from all socio-economic backgrounds with different educational goals. In this work, we developed an open-source learning module for college and high school students, which allows students to build their own robot companion from the ground up. This open platform can be used to provide hands-on experience and introductory knowledge about various aspects of AI, including robotics, machine learning (ML), software engineering, and mechanical engineering. Because of the social and personal nature of a socially assistive robot companion, this module also puts a special emphasis on human-centered AI, enabling students to develop a better understanding of human-AI interaction and AI ethics through hands-on learning activities. With open-source documentation, assembling manuals and affordable materials, students from different socio-economic backgrounds can personalize their learning experience based on their individual educational goals. To evaluate the student-perceived quality of our module, we conducted a usability testing workshop with 15 college students recruited from a minority-serving institution. Our results indicate that our AI module is effective, easy-to-follow, and engaging, and it increases student interest in studying AI/ML and robotics in the future. We hope that this work will contribute toward accessible and engaging AI education in human-AI interaction for college and high school students.},
note = {arXiv:2402.01647 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Jago, Arthur S.; Raveendhran, Roshni; Fast, Nathanael; Gratch, Jonathan
Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles Journal Article
In: Journal of Experimental Social Psychology, vol. 110, pp. 104553, 2024, ISSN: 00221031.
@article{jago_algorithmic_2024,
title = {Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles},
author = {Arthur S. Jago and Roshni Raveendhran and Nathanael Fast and Jonathan Gratch},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0022103123001105},
doi = {10.1016/j.jesp.2023.104553},
issn = {00221031},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {Journal of Experimental Social Psychology},
volume = {110},
pages = {104553},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rodrigues, Patrick B.; Becerik-Gerber, Burcin; Soibelman, Lucio; Lucas, Gale M.; Roll, Shawn C.
Virtual Environment for Studying the Effects of Operational and Environmental Sounds on Teleoperated Demolition Proceedings Article
In: Computing in Civil Engineering 2023, pp. 54–61, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8523-1.
@inproceedings{rodrigues_virtual_2024,
title = {Virtual Environment for Studying the Effects of Operational and Environmental Sounds on Teleoperated Demolition},
author = {Patrick B. Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Gale M. Lucas and Shawn C. Roll},
url = {https://ascelibrary.org/doi/10.1061/9780784485231.007},
doi = {10.1061/9780784485231.007},
isbn = {978-0-7844-8523-1},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {54–61},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2025
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.; Busta, Kelly
Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities Journal Article
In: International Journal of Disaster Risk Reduction, vol. 118, pp. 105225, 2025, ISSN: 22124209.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{liu_impact_2025,
title = {Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas and Kelly Busta},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2212420925000494},
doi = {10.1016/j.ijdrr.2025.105225},
issn = {22124209},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-20},
journal = {International Journal of Disaster Risk Reduction},
volume = {118},
pages = {105225},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2024
Marti, Deniz; Budathoki, Anjila; Ding, Yi; Lucas, Gale; Nelson, David
How Does Acknowledging Users’ Preferences Impact AI’s Ability to Make Conflicting Recommendations? Journal Article
In: International Journal of Human–Computer Interaction, pp. 1–12, 2024, ISSN: 1044-7318, 1532-7590.
Links | BibTeX | Tags: DTIC - access, Virtual Humans
@article{marti_how_2024,
title = {How Does Acknowledging Users’ Preferences Impact AI’s Ability to Make Conflicting Recommendations?},
author = {Deniz Marti and Anjila Budathoki and Yi Ding and Gale Lucas and David Nelson},
url = {https://www.tandfonline.com/doi/full/10.1080/10447318.2024.2426035},
doi = {10.1080/10447318.2024.2426035},
issn = {1044-7318, 1532-7590},
year = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {International Journal of Human–Computer Interaction},
pages = {1–12},
keywords = {DTIC - access, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Bonial, Claire; Lukin, Stephanie M.; Abrams, Mitchell; Baker, Anthony; Donatelli, Lucia; Foots, Ashley; Hayes, Cory J.; Henry, Cassidy; Hudson, Taylor; Marge, Matthew; Pollard, Kimberly A.; Artstein, Ron; Traum, David; Voss, Clare R.
Human–robot dialogue annotation for multi-modal common ground Journal Article
In: Lang Resources & Evaluation, 2024, ISSN: 1574-020X, 1574-0218.
Links | BibTeX | Tags: DTIC - access, Virtual Humans
@article{bonial_humanrobot_2024,
title = {Human–robot dialogue annotation for multi-modal common ground},
author = {Claire Bonial and Stephanie M. Lukin and Mitchell Abrams and Anthony Baker and Lucia Donatelli and Ashley Foots and Cory J. Hayes and Cassidy Henry and Taylor Hudson and Matthew Marge and Kimberly A. Pollard and Ron Artstein and David Traum and Clare R. Voss},
url = {https://link.springer.com/10.1007/s10579-024-09784-2},
doi = {10.1007/s10579-024-09784-2},
issn = {1574-020X, 1574-0218},
year = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {Lang Resources & Evaluation},
keywords = {DTIC - access, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Zaizar, Eric D.; Gramlich, Michael A.; Rizzo, Albert “Skip”; Reger, Greg M.; Norr, Aaron M.
In: Training and Education in Professional Psychology, 2024, ISSN: 1931-3926, 1931-3918.
Links | BibTeX | Tags: Virtual Humans
@article{zaizar_exploration_2024,
title = {Exploration of the impact of baseline clinician learner characteristics on motivational interviewing skill improvement following training with a virtual standardized patient.},
author = {Eric D. Zaizar and Michael A. Gramlich and Albert “Skip” Rizzo and Greg M. Reger and Aaron M. Norr},
url = {https://doi.apa.org/doi/10.1037/tep0000490},
doi = {10.1037/tep0000490},
issn = {1931-3926, 1931-3918},
year = {2024},
date = {2024-08-01},
urldate = {2024-08-13},
journal = {Training and Education in Professional Psychology},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Fischer, Katrin; Velentza, Anna-Maria; Lucas, Gale; Williams, Dmitri
Seeing Eye to Eye with Robots: An Experimental Study Predicting Trust in Social Robots for Domestic Use Proceedings Article
In: 2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN), pp. 2162–2168, IEEE, Pasadena, CA, USA, 2024, ISBN: 979-8-3503-7502-2.
Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{fischer_seeing_2024,
title = {Seeing Eye to Eye with Robots: An Experimental Study Predicting Trust in Social Robots for Domestic Use},
author = {Katrin Fischer and Anna-Maria Velentza and Gale Lucas and Dmitri Williams},
url = {https://ieeexplore.ieee.org/document/10731371/},
doi = {10.1109/RO-MAN60168.2024.10731371},
isbn = {979-8-3503-7502-2},
year = {2024},
date = {2024-08-01},
urldate = {2024-12-05},
booktitle = {2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN)},
pages = {2162–2168},
publisher = {IEEE},
address = {Pasadena, CA, USA},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Han, Bin; Yau, Cleo; Lei, Su; Gratch, Jonathan
In-Depth Analysis of Emotion Recognition through Knowledge-Based Large Language Models Miscellaneous
2024, (arXiv:2408.00780 [cs]).
Abstract | Links | BibTeX | Tags: Virtual Humans
@misc{han_-depth_2024,
title = {In-Depth Analysis of Emotion Recognition through Knowledge-Based Large Language Models},
author = {Bin Han and Cleo Yau and Su Lei and Jonathan Gratch},
url = {http://arxiv.org/abs/2408.00780},
year = {2024},
date = {2024-07-01},
urldate = {2024-08-15},
publisher = {arXiv},
abstract = {Emotion recognition in social situations is a complex task that requires integrating information from both facial expressions and the situational context. While traditional approaches to automatic emotion recognition have focused on decontextualized signals, recent research emphasizes the importance of context in shaping emotion perceptions. This paper contributes to the emerging field of context-based emotion recognition by leveraging psychological theories of human emotion perception to inform the design of automated methods. We propose an approach that combines emotion recognition methods with Bayesian Cue Integration (BCI) to integrate emotion inferences from decontextualized facial expressions and contextual knowledge inferred via Large-language Models. We test this approach in the context of interpreting facial expressions during a social task, the prisoner's dilemma. Our results provide clear support for BCI across a range of automatic emotion recognition methods. The best automated method achieved results comparable to human observers, suggesting the potential for this approach to advance the field of affective computing.},
note = {arXiv:2408.00780 [cs]},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Owayyed, Mohammed Al; Tielman, Myrthe; Hartholt, Arno; Specht, Marcus; Brinkman, Willem-Paul
Agent-based social skills training systems: the ARTES architecture, interaction characteristics, learning theories and future outlooks Journal Article
In: Behaviour & Information Technology, pp. 1–28, 2024, ISSN: 0144-929X, 1362-3001.
Links | BibTeX | Tags: Virtual Agents, Virtual Humans
@article{al_owayyed_agent-based_2024,
title = {Agent-based social skills training systems: the ARTES architecture, interaction characteristics, learning theories and future outlooks},
author = {Mohammed Al Owayyed and Myrthe Tielman and Arno Hartholt and Marcus Specht and Willem-Paul Brinkman},
url = {https://www.tandfonline.com/doi/full/10.1080/0144929X.2024.2374891},
doi = {10.1080/0144929X.2024.2374891},
issn = {0144-929X, 1362-3001},
year = {2024},
date = {2024-07-01},
urldate = {2024-08-15},
journal = {Behaviour & Information Technology},
pages = {1–28},
keywords = {Virtual Agents, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Yin, Yinxuan; Nayyar, Mollik; Holman, Daniel; Lucas, Gale; Holbrook, Colin; Wagner, Alan
Validation and Evacuee Modeling of Virtual Robot-guided Emergency Evacuation Experiments Miscellaneous
2024.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@misc{yin_validation_2024,
title = {Validation and Evacuee Modeling of Virtual Robot-guided Emergency Evacuation Experiments},
author = {Yinxuan Yin and Mollik Nayyar and Daniel Holman and Gale Lucas and Colin Holbrook and Alan Wagner},
url = {https://osf.io/mr78s},
doi = {10.31234/osf.io/mr78s},
year = {2024},
date = {2024-06-01},
urldate = {2024-09-17},
publisher = {Center for Open Science},
abstract = {Virtual Reality (VR) is an increasingly common tool for investigating human responses to emergency situations. Nonetheless, studies validating and comparing human subject behavior during real world emergencies to their responses in VR are notably rare, and no prior studies have validated whether human emergency responses to guidance from a robot are comparable in VR versus the real world. In the present pre-registered study, we used VR to replicate a previous robot- guided emergency evacuation study conducted in the real world and compared human subject behavior in matched physical and virtual environments. In both environments, human subjects were asked to follow a robot to a location and to then read an article. While reading, a fire alarm sounds. The robot then attempted to guide them to a distant, unfamiliar exit rather than nearby and familiar exits. We observed close correspondences between evacuee exit choice (the robot’s distant exit versus closer exits), evacuation time, and trust in the robot between the VR and physical environments. We further demonstrate that data collected in virtual reality can be used to create accurate motion models (mean error of 0.42 centimeters) predicting evacuee trajectories and locations in real life. Taken together, the results provide evidence for the ecological validity of VR approaches to studying human-robot interaction, particularly robot- guided emergency evacuation.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Mozgai, Sharon A; Kaurloto, Cari; Winn, Jade G; Leeds, Andrew; Beland, Sarah; Sookiassian, Arman; Hartholt, Arno
Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–8, ACM, Honolulu HI USA, 2024, ISBN: 979-8-4007-0331-7.
Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@inproceedings{mozgai_accelerating_2024,
title = {Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool},
author = {Sharon A Mozgai and Cari Kaurloto and Jade G Winn and Andrew Leeds and Sarah Beland and Arman Sookiassian and Arno Hartholt},
url = {https://dl.acm.org/doi/10.1145/3613905.3637110},
doi = {10.1145/3613905.3637110},
isbn = {979-8-4007-0331-7},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-18},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Hao; Chang, Di; Li, Fang; Soleymani, Mohammad; Ahuja, Narendra
MagicPose4D: Crafting Articulated Models with Appearance and Motion Control Miscellaneous
2024, (Version Number: 1).
Abstract | Links | BibTeX | Tags: VGL, Virtual Humans
@misc{zhang_magicpose4d_2024,
title = {MagicPose4D: Crafting Articulated Models with Appearance and Motion Control},
author = {Hao Zhang and Di Chang and Fang Li and Mohammad Soleymani and Narendra Ahuja},
url = {https://arxiv.org/abs/2405.14017},
doi = {10.48550/ARXIV.2405.14017},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-25},
publisher = {arXiv},
abstract = {With the success of 2D and 3D visual generative models, there is growing interest in generating 4D content. Existing methods primarily rely on text prompts to produce 4D content, but they often fall short of accurately defining complex or rare motions. To address this limitation, we propose MagicPose4D, a novel framework for refined control over both appearance and motion in 4D generation. Unlike traditional methods, MagicPose4D accepts monocular videos as motion prompts, enabling precise and customizable motion generation. MagicPose4D comprises two key modules:
i) Dual-Phase 4D Reconstruction Modulevphantom which operates in two phases. The first phase focuses on capturing the model's shape using accurate 2D supervision and less accurate but geometrically informative 3D pseudo-supervision without imposing skeleton constraints. The second phase refines the model using more accurate pseudo-3D supervision, obtained in the first phase and introduces kinematic chain-based skeleton constraints to ensure physical plausibility. Additionally, we propose a Global-local Chamfer loss that aligns the overall distribution of predicted mesh vertices with the supervision while maintaining part-level alignment without extra annotations.
ii) Cross-category Motion Transfer Modulevphantom leverages the predictions from the 4D reconstruction module and uses a kinematic-chain-based skeleton to achieve cross-category motion transfer. It ensures smooth transitions between frames through dynamic rigidity, facilitating robust generalization without additional training.
Through extensive experiments, we demonstrate that MagicPose4D significantly improves the accuracy and consistency of 4D content generation, outperforming existing methods in various benchmarks.},
note = {Version Number: 1},
keywords = {VGL, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
i) Dual-Phase 4D Reconstruction Modulevphantom which operates in two phases. The first phase focuses on capturing the model's shape using accurate 2D supervision and less accurate but geometrically informative 3D pseudo-supervision without imposing skeleton constraints. The second phase refines the model using more accurate pseudo-3D supervision, obtained in the first phase and introduces kinematic chain-based skeleton constraints to ensure physical plausibility. Additionally, we propose a Global-local Chamfer loss that aligns the overall distribution of predicted mesh vertices with the supervision while maintaining part-level alignment without extra annotations.
ii) Cross-category Motion Transfer Modulevphantom leverages the predictions from the 4D reconstruction module and uses a kinematic-chain-based skeleton to achieve cross-category motion transfer. It ensures smooth transitions between frames through dynamic rigidity, facilitating robust generalization without additional training.
Through extensive experiments, we demonstrate that MagicPose4D significantly improves the accuracy and consistency of 4D content generation, outperforming existing methods in various benchmarks.
West, Taylor Nicole; Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; Hale, James; Gratch, Jonathan; Fredrickson, Barbara
2024.
Abstract | Links | BibTeX | Tags: Emotions, Virtual Humans
@misc{west_improving_2024,
title = {Improving Social Connection with Weak Ties and Strangers: Effects of a New Micro-Intervention on Interaction Quality and Social Behavior},
author = {Taylor Nicole West and Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and James Hale and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/ytjr6},
doi = {10.31234/osf.io/ytjr6},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-25},
abstract = {We propose that the emotional quality of people’s interactions with acquaintances (i.e., weak ties) and strangers contributes to well-being. We test whether a new micro-intervention can raise the quality of these interactions. We randomized young adults (N = 335) to this connectedness micro-intervention or a control intervention. Both interventions were delivered via a psychoeducational video followed by a brief conversation with a virtual human, with whom participants developed if-then plans to carry out their assigned behavioral goal. Pre-intervention, high-quality weak-tie and stranger interactions were associated with lower loneliness and greater mental health independent of strong-tie interaction quality. Experimental data showed the connectedness intervention improved the emotional quality of participants' interactions with weak ties and strangers over two days, evident in participants’ episodic self-reports and faster in-lab conversational response time. Discussion centers on implications for developing scalable behavioral interventions to improve well-being.},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Hartholt, Arno; Leeds, Andrew; Fast, Ed; Sookiassian, Edwin; Kim, Kevin; Beland, Sarah; Kulkarni, Pranav; Mozgai, Sharon
Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms Proceedings Article
In: 2024.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hartholt_multidisciplinary_2024,
title = {Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms},
author = {Arno Hartholt and Andrew Leeds and Ed Fast and Edwin Sookiassian and Kevin Kim and Sarah Beland and Pranav Kulkarni and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-95-7/article/978-1-958651-95-7_33},
doi = {10.54941/ahfe1004497},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-16},
abstract = {The current pace of technological advancements has led to an ever-increasing availability of technologies to investigate and help address the challenges that contemporary society faces today. However, while this trend increases the potential for creating more relevant, effective, and efficient solutions, it also inherently increases the complexity of realizing that potential. Our work aims to manage this complexity through the creation and dissemination of integrated middleware platforms that enable researchers and developers to rapidly prototype novel solutions within the areas of modelling & simulation, virtual humans, and virtual worlds. In this paper, we discuss two related platforms: the Rapid Integration & Development Environment (RIDE) and the Virtual Human Toolkit (VHToolkit). Specifically, we explore two use cases: 1) the development of an authoring tool aimed at domain experts to rapidly create low-echelon military training scenarios, and 2) the development of a virtual human led mHealth wellness and suicide prevention app for veterans.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
Dyadic Interaction Modeling for Social Behavior Generation Miscellaneous
2024, (arXiv:2403.09069 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@misc{tran_dyadic_2024,
title = {Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.09069},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-19},
publisher = {arXiv},
abstract = {Human-human communication is like a delicate dance where listeners and speakers concurrently interact to maintain conversational dynamics. Hence, an effective model for generating listener nonverbal behaviors requires understanding the dyadic context and interaction. In this paper, we present an effective framework for creating 3D facial motions in dyadic interactions. Existing work consider a listener as a reactive agent with reflexive behaviors to the speaker's voice and facial motions. The heart of our framework is Dyadic Interaction Modeling (DIM), a pre-training approach that jointly models speakers' and listeners' motions through masking and contrastive learning to learn representations that capture the dyadic context. To enable the generation of non-deterministic behaviors, we encode both listener and speaker motions into discrete latent representations, through VQ-VAE. The pre-trained model is further fine-tuned for motion generation. Extensive experiments demonstrate the superiority of our framework in generating listener motions, establishing a new state-of-the-art according to the quantitative measures capturing the diversity and realism of generated motions. Qualitative results demonstrate the superior capabilities of the proposed approach in generating diverse and realistic expressions, eye blinks and head gestures.},
note = {arXiv:2403.09069 [cs]},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Lu, Liupei; Yin, Yufeng; Gu, Yuming; Wu, Yizhen; Prasad, Pratusha; Zhao, Yajie; Soleymani, Mohammad
Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection Miscellaneous
2024, (arXiv:2403.10737 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@misc{lu_leveraging_2024,
title = {Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection},
author = {Liupei Lu and Yufeng Yin and Yuming Gu and Yizhen Wu and Pratusha Prasad and Yajie Zhao and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.10737},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
publisher = {arXiv},
abstract = {Facial action unit (AU) detection is a fundamental block for objective facial expression analysis. Supervised learning approaches require a large amount of manual labeling which is costly. The limited labeled data are also not diverse in terms of gender which can affect model fairness. In this paper, we propose to use synthetically generated data and multi-source domain adaptation (MSDA) to address the problems of the scarcity of labeled data and the diversity of subjects. Specifically, we propose to generate a diverse dataset through synthetic facial expression re-targeting by transferring the expressions from real faces to synthetic avatars. Then, we use MSDA to transfer the AU detection knowledge from a real dataset and the synthetic dataset to a target dataset. Instead of aligning the overall distributions of different domains, we propose Paired Moment Matching (PM2) to align the features of the paired real and synthetic data with the same facial expression. To further improve gender fairness, PM2 matches the features of the real data with a female and a male synthetic image. Our results indicate that synthetic data and the proposed model improve both AU detection performance and fairness across genders, demonstrating its potential to solve AU detection in-the-wild.},
note = {arXiv:2403.10737 [cs]},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Kwon, Deuksin; Weiss, Emily; Kulshrestha, Tara; Chawla, Kushal; Lucas, Gale M.; Gratch, Jonathan
Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues Miscellaneous
2024, (arXiv:2402.13550 [cs]).
Abstract | Links | BibTeX | Tags: AI, Virtual Humans
@misc{kwon_are_2024,
title = {Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues},
author = {Deuksin Kwon and Emily Weiss and Tara Kulshrestha and Kushal Chawla and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2402.13550},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
publisher = {arXiv},
abstract = {A successful negotiation demands a deep comprehension of the conversation context, Theory-of-Mind (ToM) skills to infer the partner's motives, as well as strategic reasoning and effective communication, making it challenging for automated systems. Given the remarkable performance of LLMs across a variety of NLP tasks, in this work, we aim to understand how LLMs can advance different aspects of negotiation research, ranging from designing dialogue systems to providing pedagogical feedback and scaling up data collection practices. To this end, we devise a methodology to analyze the multifaceted capabilities of LLMs across diverse dialogue scenarios covering all the time stages of a typical negotiation interaction. Our analysis adds to the increasing evidence for the superiority of GPT-4 across various tasks while also providing insights into specific tasks that remain difficult for LLMs. For instance, the models correlate poorly with human players when making subjective assessments about the negotiation dialogues and often struggle to generate responses that are contextually appropriate as well as strategically advantageous.},
note = {arXiv:2402.13550 [cs]},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Murawski, Alaine; Ramirez‐Zohfeld, Vanessa; Mell, Johnathan; Tschoe, Marianne; Schierer, Allison; Olvera, Charles; Brett, Jeanne; Gratch, Jonathan; Lindquist, Lee A.
Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program Journal Article
In: J American Geriatrics Society, pp. jgs.18775, 2024, ISSN: 0002-8614, 1532-5415.
Abstract | Links | BibTeX | Tags: AI, Virtual Humans
@article{murawski_development_2024,
title = {Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program},
author = {Alaine Murawski and Vanessa Ramirez‐Zohfeld and Johnathan Mell and Marianne Tschoe and Allison Schierer and Charles Olvera and Jeanne Brett and Jonathan Gratch and Lee A. Lindquist},
url = {https://agsjournals.onlinelibrary.wiley.com/doi/10.1111/jgs.18775},
doi = {10.1111/jgs.18775},
issn = {0002-8614, 1532-5415},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {J American Geriatrics Society},
pages = {jgs.18775},
abstract = {Abstract
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.
Gratch, Jonathan; Greene, Gretchen; Picard, Rosalind; Urquhart, Lachlan; Valstar, Michel
Guest Editorial: Ethics in Affective Computing Journal Article
In: IEEE Trans. Affective Comput., vol. 15, no. 1, pp. 1–3, 2024, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: Virtual Humans
@article{gratch_guest_2024,
title = {Guest Editorial: Ethics in Affective Computing},
author = {Jonathan Gratch and Gretchen Greene and Rosalind Picard and Lachlan Urquhart and Michel Valstar},
url = {https://ieeexplore.ieee.org/document/10454111/},
doi = {10.1109/TAFFC.2023.3322918},
issn = {1949-3045, 2371-9850},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-14},
journal = {IEEE Trans. Affective Comput.},
volume = {15},
number = {1},
pages = {1–3},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Shi, Zhonghao; O'Connell, Allison; Li, Zongjian; Liu, Siqi; Ayissi, Jennifer; Hoffman, Guy; Soleymani, Mohammad; Matarić, Maja J.
Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education Miscellaneous
2024, (arXiv:2402.01647 [cs]).
Abstract | Links | BibTeX | Tags: Virtual Humans
@misc{shi_build_2024,
title = {Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education},
author = {Zhonghao Shi and Allison O'Connell and Zongjian Li and Siqi Liu and Jennifer Ayissi and Guy Hoffman and Mohammad Soleymani and Maja J. Matarić},
url = {http://arxiv.org/abs/2402.01647},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {As artificial intelligence (AI) is playing an increasingly important role in our society and global economy, AI education and literacy have become necessary components in college and K-12 education to prepare students for an AI-powered society. However, current AI curricula have not yet been made accessible and engaging enough for students and schools from all socio-economic backgrounds with different educational goals. In this work, we developed an open-source learning module for college and high school students, which allows students to build their own robot companion from the ground up. This open platform can be used to provide hands-on experience and introductory knowledge about various aspects of AI, including robotics, machine learning (ML), software engineering, and mechanical engineering. Because of the social and personal nature of a socially assistive robot companion, this module also puts a special emphasis on human-centered AI, enabling students to develop a better understanding of human-AI interaction and AI ethics through hands-on learning activities. With open-source documentation, assembling manuals and affordable materials, students from different socio-economic backgrounds can personalize their learning experience based on their individual educational goals. To evaluate the student-perceived quality of our module, we conducted a usability testing workshop with 15 college students recruited from a minority-serving institution. Our results indicate that our AI module is effective, easy-to-follow, and engaging, and it increases student interest in studying AI/ML and robotics in the future. We hope that this work will contribute toward accessible and engaging AI education in human-AI interaction for college and high school students.},
note = {arXiv:2402.01647 [cs]},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Jago, Arthur S.; Raveendhran, Roshni; Fast, Nathanael; Gratch, Jonathan
Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles Journal Article
In: Journal of Experimental Social Psychology, vol. 110, pp. 104553, 2024, ISSN: 00221031.
Links | BibTeX | Tags: Virtual Humans
@article{jago_algorithmic_2024,
title = {Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles},
author = {Arthur S. Jago and Roshni Raveendhran and Nathanael Fast and Jonathan Gratch},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0022103123001105},
doi = {10.1016/j.jesp.2023.104553},
issn = {00221031},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {Journal of Experimental Social Psychology},
volume = {110},
pages = {104553},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rodrigues, Patrick B.; Becerik-Gerber, Burcin; Soibelman, Lucio; Lucas, Gale M.; Roll, Shawn C.
Virtual Environment for Studying the Effects of Operational and Environmental Sounds on Teleoperated Demolition Proceedings Article
In: Computing in Civil Engineering 2023, pp. 54–61, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8523-1.
Links | BibTeX | Tags: DTIC, Virtual Humans, VR
@inproceedings{rodrigues_virtual_2024,
title = {Virtual Environment for Studying the Effects of Operational and Environmental Sounds on Teleoperated Demolition},
author = {Patrick B. Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Gale M. Lucas and Shawn C. Roll},
url = {https://ascelibrary.org/doi/10.1061/9780784485231.007},
doi = {10.1061/9780784485231.007},
isbn = {978-0-7844-8523-1},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {54–61},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {DTIC, Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ruying; Becerik-Gerber, Burçin; Lucas, Gale M.; Busta, Kelly
Development of a VR Training Platform for Active Shooter Incident Preparedness in Healthcare Environments via a Stakeholder-Engaged Process Proceedings Article
In: Computing in Civil Engineering 2023, pp. 45–53, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8523-1.
Links | BibTeX | Tags: Virtual Humans, VR
@inproceedings{liu_development_2024,
title = {Development of a VR Training Platform for Active Shooter Incident Preparedness in Healthcare Environments via a Stakeholder-Engaged Process},
author = {Ruying Liu and Burçin Becerik-Gerber and Gale M. Lucas and Kelly Busta},
url = {https://ascelibrary.org/doi/10.1061/9780784485231.006},
doi = {10.1061/9780784485231.006},
isbn = {978-0-7844-8523-1},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {45–53},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Seyedrezaei, Mirmahdi; Becerik-Gerber, Burcin; Lucas, Gale
Investigating the Interplay between Indoor Environmental Quality and Workers’ Health and Productivity: Preliminary Results Proceedings Article
In: Computing in Civil Engineering 2023, pp. 614–622, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8524-8.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{awada_investigating_2024,
title = {Investigating the Interplay between Indoor Environmental Quality and Workers’ Health and Productivity: Preliminary Results},
author = {Mohamad Awada and Mirmahdi Seyedrezaei and Burcin Becerik-Gerber and Gale Lucas},
url = {https://ascelibrary.org/doi/10.1061/9780784485248.074},
doi = {10.1061/9780784485248.074},
isbn = {978-0-7844-8524-8},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {614–622},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
Abstract | Links | BibTeX | Tags: AI, Dialogue, DTIC, UARC, Virtual Humans
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {AI, Dialogue, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags: DTIC, Machine Learning, UARC, Virtual Humans
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {DTIC, Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
Abstract | Links | BibTeX | Tags: Dialogue, DTIC, UARC, Virtual Humans
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {Dialogue, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; West, Taylor Nicole; Gratch, Jonathan; Fredrickson, Barbara
Can AI Agents Help Humans to Connect? Technical Report
PsyArXiv 2023.
Abstract | Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@techreport{prinzing_can_2023,
title = {Can AI Agents Help Humans to Connect?},
author = {Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and Taylor Nicole West and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/muq6s},
doi = {10.31234/osf.io/muq6s},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
institution = {PsyArXiv},
abstract = {This paper reports on a pre-registered experiment designed to test whether artificial agents can help people to create more moments of high-quality connection with other humans. Of four pre-registered hypotheses, we found (partial) support for only one.},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
Privacy-preserving Representation Learning for Speech Understanding Miscellaneous
2023, (arXiv:2310.17194 [eess]).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@misc{tran_privacy-preserving_2023,
title = {Privacy-preserving Representation Learning for Speech Understanding},
author = {Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2310.17194},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Existing privacy-preserving speech representation learning methods target a single application domain. In this paper, we present a novel framework to anonymize utterance-level speech embeddings generated by pre-trained encoders and show its effectiveness for a range of speech classification tasks. Specifically, given the representations from a pre-trained encoder, we train a Transformer to estimate the representations for the same utterances spoken by other speakers. During inference, the extracted representations can be converted into different identities to preserve privacy. We compare the results with the voice anonymization baselines from the VoicePrivacy 2022 challenge. We evaluate our framework on speaker identification for privacy and emotion recognition, depression classification, and intent classification for utility. Our method outperforms the baselines on privacy and utility in paralinguistic tasks and achieves comparable performance for intent classification.},
note = {arXiv:2310.17194 [eess]},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 979-8-4007-0321-8.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {979-8-4007-0321-8},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 979-8-4007-0321-8.
Links | BibTeX | Tags: AI, UARC, Virtual Humans
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {979-8-4007-0321-8},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 979-8-4007-0055-2.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {979-8-4007-0055-2},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
Links | BibTeX | Tags: DTIC, MxR, UARC, Virtual Humans
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {DTIC, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chang, Di; Yin, Yufeng; Li, Zongjian; Tran, Minh; Soleymani, Mohammad
LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis Miscellaneous
2023, (arXiv:2308.10713 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@misc{chang_libreface_2023,
title = {LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis},
author = {Di Chang and Yufeng Yin and Zongjian Li and Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.10713},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Facial expression analysis is an important tool for human-computer interaction. In this paper, we introduce LibreFace, an open-source toolkit for facial expression analysis. This open-source toolbox offers real-time and offline analysis of facial behavior through deep learning models, including facial action unit (AU) detection, AU intensity estimation, and facial expression recognition. To accomplish this, we employ several techniques, including the utilization of a large-scale pre-trained network, feature-wise knowledge distillation, and task-specific fine-tuning. These approaches are designed to effectively and accurately analyze facial expressions by leveraging visual information, thereby facilitating the implementation of real-time interactive applications. In terms of Action Unit (AU) intensity estimation, we achieve a Pearson Correlation Coefficient (PCC) of 0.63 on DISFA, which is 7% higher than the performance of OpenFace 2.0 while maintaining highly-efficient inference that runs two times faster than OpenFace 2.0. Despite being compact, our model also demonstrates competitive performance to state-of-the-art facial expression analysis methods on AffecNet, FFHQ, and RAF-DB. Our code will be released at https://github.com/ihp-lab/LibreFace},
note = {arXiv:2308.10713 [cs]},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Yin, Yufeng; Chang, Di; Song, Guoxian; Sang, Shen; Zhi, Tiancheng; Liu, Jing; Luo, Linjie; Soleymani, Mohammad
FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features Miscellaneous
2023, (arXiv:2308.12380 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@misc{yin_fg-net_2023,
title = {FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features},
author = {Yufeng Yin and Di Chang and Guoxian Song and Shen Sang and Tiancheng Zhi and Jing Liu and Linjie Luo and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.12380},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Automatic detection of facial Action Units (AUs) allows for objective facial expression analysis. Due to the high cost of AU labeling and the limited size of existing benchmarks, previous AU detection methods tend to overfit the dataset, resulting in a significant performance loss when evaluated across corpora. To address this problem, we propose FG-Net for generalizable facial action unit detection. Specifically, FG-Net extracts feature maps from a StyleGAN2 model pre-trained on a large and diverse face image dataset. Then, these features are used to detect AUs with a Pyramid CNN Interpreter, making the training efficient and capturing essential local features. The proposed FG-Net achieves a strong generalization ability for heatmap-based AU detection thanks to the generalizable and semantic-rich features extracted from the pre-trained generative model. Extensive experiments are conducted to evaluate within- and cross-corpus AU detection with the widely-used DISFA and BP4D datasets. Compared with the state-of-the-art, the proposed method achieves superior cross-domain performance while maintaining competitive within-domain performance. In addition, FG-Net is data-efficient and achieves competitive performance even when trained on 1000 samples. Our code will be released at textbackslashurlhttps://github.com/ihp-lab/FG-Net},
note = {arXiv:2308.12380 [cs]},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Kappas, Arvid; Gratch, Jonathan
These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI Journal Article
In: Affec Sci, 2023, ISSN: 2662-2041, 2662-205X.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{kappas_these_2023,
title = {These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI},
author = {Arvid Kappas and Jonathan Gratch},
url = {https://link.springer.com/10.1007/s42761-023-00211-3},
doi = {10.1007/s42761-023-00211-3},
issn = {2662-2041, 2662-205X},
year = {2023},
date = {2023-08-01},
urldate = {2023-09-20},
journal = {Affec Sci},
abstract = {Abstract
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. Volume 45, 2023.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{hale_risk_2023,
title = {Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://escholarship.org/uc/item/7n01v4f9#main},
year = {2023},
date = {2023-08-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {Volume 45},
abstract = {Women and minorities obtain lower salaries when negotiating their employment compensation. Some have suggested that automated negotiation and dispute-resolution technology might address such material inequities. These algorithms elicit the multi-criteria preferences of each side of a dispute and arrive at solutions that are efficient and "provably" fair. In a study that explores the potential benefit of these methods, we highlight cognitive factors that may allow inequities to persist despite these methods. Specifically, risk-averse individuals express lower preferences for salary and as risk-aversion is more common in women and minorities, this translates into a ``provably'' fair lower salary. While this may reflect actual underlying differences in preferences across groups, individuals may be confounding their preferences for salary with their risk preference (i.e., their fear of not reaching an agreement), such that these groups achieve worse outcomes than they should. We further highlight that methodological choices in how negotiation processes are often studied can obscure the magnitude of this effect.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-07-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tak, Ala N.; Gratch, Jonathan
Is GPT a Computational Model of Emotion? Detailed Analysis Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{tak_is_2023,
title = {Is GPT a Computational Model of Emotion? Detailed Analysis},
author = {Ala N. Tak and Jonathan Gratch},
url = {https://arxiv.org/abs/2307.13779},
doi = {10.48550/ARXIV.2307.13779},
year = {2023},
date = {2023-07-01},
urldate = {2023-09-20},
abstract = {This paper investigates the emotional reasoning abilities of the GPT family of large language models via a component perspective. The paper first examines how the model reasons about autobiographical memories. Second, it systematically varies aspects of situations to impact emotion intensity and coping tendencies. Even without the use of prompt engineering, it is shown that GPT's predictions align significantly with human-provided appraisals and emotional labels. However, GPT faces difficulties predicting emotion intensity and coping responses. GPT-4 showed the highest performance in the initial study but fell short in the second, despite providing superior results after minor prompt engineering. This assessment brings up questions on how to effectively employ the strong points and address the weak areas of these models, particularly concerning response variability. These studies underscore the merits of evaluating models from a componential perspective.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Teaching Reverse Appraisal to Improve Negotiation Skills Journal Article
In: IEEE Trans. Affective Comput., pp. 1–14, 2023, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{sato_teaching_2023,
title = {Teaching Reverse Appraisal to Improve Negotiation Skills},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/10189838/},
doi = {10.1109/TAFFC.2023.3285931},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-07-01},
urldate = {2023-09-20},
journal = {IEEE Trans. Affective Comput.},
pages = {1–14},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis Book Section
In: Kurosu, Masaaki; Hashizume, Ayako (Ed.): Human-Computer Interaction, vol. 14013, pp. 407–418, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35601-8 978-3-031-35602-5, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: AI, Virtual Humans
@incollection{kurosu_relationship_2023,
title = {The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu and Ayako Hashizume},
url = {https://link.springer.com/10.1007/978-3-031-35602-5_29},
doi = {10.1007/978-3-031-35602-5_29},
isbn = {978-3-031-35601-8 978-3-031-35602-5},
year = {2023},
date = {2023-07-01},
urldate = {2023-09-20},
booktitle = {Human-Computer Interaction},
volume = {14013},
pages = {407–418},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pynadath, David V.; Gurney, Nikolos
The Design of Transparency Communication for Human-Multirobot Teams Book Section
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, vol. 14051, pp. 311–321, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35893-7 978-3-031-35894-4, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: AI, DTIC, Virtual Humans
@incollection{degen_design_2023,
title = {The Design of Transparency Communication for Human-Multirobot Teams},
author = {Ning Wang and David V. Pynadath and Nikolos Gurney},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/10.1007/978-3-031-35894-4_23},
doi = {10.1007/978-3-031-35894-4_23},
isbn = {978-3-031-35893-7 978-3-031-35894-4},
year = {2023},
date = {2023-07-01},
urldate = {2023-08-24},
booktitle = {Artificial Intelligence in HCI},
volume = {14051},
pages = {311–321},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {AI, DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Rodrigues, Patrick B.; Singh, Rashmi; Oytun, Mert; Adami, Pooya; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale M.
A multidimensional taxonomy for human-robot interaction in construction Journal Article
In: Automation in Construction, vol. 150, pp. 104845, 2023, ISSN: 0926-5805.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{rodrigues_multidimensional_2023,
title = {A multidimensional taxonomy for human-robot interaction in construction},
author = {Patrick B. Rodrigues and Rashmi Singh and Mert Oytun and Pooya Adami and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale M. Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S092658052300105X},
doi = {10.1016/j.autcon.2023.104845},
issn = {0926-5805},
year = {2023},
date = {2023-06-01},
urldate = {2023-03-31},
journal = {Automation in Construction},
volume = {150},
pages = {104845},
abstract = {Despite the increased interest in construction robotics both in academia and the industry, insufficient attention has been given to aspects related to Human-Robot Interaction (HRI). Characterizing HRI for construction tasks can help researchers organize knowledge in a structured manner that allows for classifying construction robotics applications and comparing and benchmarking different studies. This paper builds upon existing taxonomies and empirical studies in HRI in various industries (e.g., construction, manufacturing, and military, among others) to propose a multidimensional taxonomy to characterize HRI applications in the construction industry. The taxonomy design followed a systematic literature review in which common themes were identified and grouped into 16 categories. The proposed taxonomy can be used as a foundation for systematic reviews and meta-analyses of HRI applications in construction and can benefit the construction industry by informing the design of collaborative tasks performed by human-robot teams.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; Gil, Yolanda
Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation Book Section
In: Wang, Ning; Rebolledo-Mendez, Genaro; Dimitrova, Vania; Matsuda, Noboru; Santos, Olga C. (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky, vol. 1831, pp. 530–535, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36335-1 978-3-031-36336-8, (Series Title: Communications in Computer and Information Science).
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{wang_virtual_2023,
title = {Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch and Yolanda Gil},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Vania Dimitrova and Noboru Matsuda and Olga C. Santos},
url = {https://link.springer.com/10.1007/978-3-031-36336-8_82},
doi = {10.1007/978-3-031-36336-8_82},
isbn = {978-3-031-36335-1 978-3-031-36336-8},
year = {2023},
date = {2023-06-01},
urldate = {2023-09-20},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky},
volume = {1831},
pages = {530–535},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}