Publications
Search
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
Dyadic Interaction Modeling for Social Behavior Generation Miscellaneous
2024, (arXiv:2403.09069 [cs]).
@misc{tran_dyadic_2024,
title = {Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.09069},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-19},
publisher = {arXiv},
abstract = {Human-human communication is like a delicate dance where listeners and speakers concurrently interact to maintain conversational dynamics. Hence, an effective model for generating listener nonverbal behaviors requires understanding the dyadic context and interaction. In this paper, we present an effective framework for creating 3D facial motions in dyadic interactions. Existing work consider a listener as a reactive agent with reflexive behaviors to the speaker's voice and facial motions. The heart of our framework is Dyadic Interaction Modeling (DIM), a pre-training approach that jointly models speakers' and listeners' motions through masking and contrastive learning to learn representations that capture the dyadic context. To enable the generation of non-deterministic behaviors, we encode both listener and speaker motions into discrete latent representations, through VQ-VAE. The pre-trained model is further fine-tuned for motion generation. Extensive experiments demonstrate the superiority of our framework in generating listener motions, establishing a new state-of-the-art according to the quantitative measures capturing the diversity and realism of generated motions. Qualitative results demonstrate the superior capabilities of the proposed approach in generating diverse and realistic expressions, eye blinks and head gestures.},
note = {arXiv:2403.09069 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Kwon, Deuksin; Weiss, Emily; Kulshrestha, Tara; Chawla, Kushal; Lucas, Gale M.; Gratch, Jonathan
Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues Miscellaneous
2024, (arXiv:2402.13550 [cs]).
@misc{kwon_are_2024,
title = {Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues},
author = {Deuksin Kwon and Emily Weiss and Tara Kulshrestha and Kushal Chawla and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2402.13550},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
publisher = {arXiv},
abstract = {A successful negotiation demands a deep comprehension of the conversation context, Theory-of-Mind (ToM) skills to infer the partner's motives, as well as strategic reasoning and effective communication, making it challenging for automated systems. Given the remarkable performance of LLMs across a variety of NLP tasks, in this work, we aim to understand how LLMs can advance different aspects of negotiation research, ranging from designing dialogue systems to providing pedagogical feedback and scaling up data collection practices. To this end, we devise a methodology to analyze the multifaceted capabilities of LLMs across diverse dialogue scenarios covering all the time stages of a typical negotiation interaction. Our analysis adds to the increasing evidence for the superiority of GPT-4 across various tasks while also providing insights into specific tasks that remain difficult for LLMs. For instance, the models correlate poorly with human players when making subjective assessments about the negotiation dialogues and often struggle to generate responses that are contextually appropriate as well as strategically advantageous.},
note = {arXiv:2402.13550 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Murawski, Alaine; Ramirez‐Zohfeld, Vanessa; Mell, Johnathan; Tschoe, Marianne; Schierer, Allison; Olvera, Charles; Brett, Jeanne; Gratch, Jonathan; Lindquist, Lee A.
Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program Journal Article
In: J American Geriatrics Society, pp. jgs.18775, 2024, ISSN: 0002-8614, 1532-5415.
@article{murawski_development_2024,
title = {Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program},
author = {Alaine Murawski and Vanessa Ramirez‐Zohfeld and Johnathan Mell and Marianne Tschoe and Allison Schierer and Charles Olvera and Jeanne Brett and Jonathan Gratch and Lee A. Lindquist},
url = {https://agsjournals.onlinelibrary.wiley.com/doi/10.1111/jgs.18775},
doi = {10.1111/jgs.18775},
issn = {0002-8614, 1532-5415},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {J American Geriatrics Society},
pages = {jgs.18775},
abstract = {Abstract
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.
Shi, Zhonghao; O'Connell, Allison; Li, Zongjian; Liu, Siqi; Ayissi, Jennifer; Hoffman, Guy; Soleymani, Mohammad; Matarić, Maja J.
Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education Miscellaneous
2024, (arXiv:2402.01647 [cs]).
@misc{shi_build_2024,
title = {Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education},
author = {Zhonghao Shi and Allison O'Connell and Zongjian Li and Siqi Liu and Jennifer Ayissi and Guy Hoffman and Mohammad Soleymani and Maja J. Matarić},
url = {http://arxiv.org/abs/2402.01647},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {As artificial intelligence (AI) is playing an increasingly important role in our society and global economy, AI education and literacy have become necessary components in college and K-12 education to prepare students for an AI-powered society. However, current AI curricula have not yet been made accessible and engaging enough for students and schools from all socio-economic backgrounds with different educational goals. In this work, we developed an open-source learning module for college and high school students, which allows students to build their own robot companion from the ground up. This open platform can be used to provide hands-on experience and introductory knowledge about various aspects of AI, including robotics, machine learning (ML), software engineering, and mechanical engineering. Because of the social and personal nature of a socially assistive robot companion, this module also puts a special emphasis on human-centered AI, enabling students to develop a better understanding of human-AI interaction and AI ethics through hands-on learning activities. With open-source documentation, assembling manuals and affordable materials, students from different socio-economic backgrounds can personalize their learning experience based on their individual educational goals. To evaluate the student-perceived quality of our module, we conducted a usability testing workshop with 15 college students recruited from a minority-serving institution. Our results indicate that our AI module is effective, easy-to-follow, and engaging, and it increases student interest in studying AI/ML and robotics in the future. We hope that this work will contribute toward accessible and engaging AI education in human-AI interaction for college and high school students.},
note = {arXiv:2402.01647 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Jago, Arthur S.; Raveendhran, Roshni; Fast, Nathanael; Gratch, Jonathan
Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles Journal Article
In: Journal of Experimental Social Psychology, vol. 110, pp. 104553, 2024, ISSN: 00221031.
@article{jago_algorithmic_2024,
title = {Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles},
author = {Arthur S. Jago and Roshni Raveendhran and Nathanael Fast and Jonathan Gratch},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0022103123001105},
doi = {10.1016/j.jesp.2023.104553},
issn = {00221031},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {Journal of Experimental Social Psychology},
volume = {110},
pages = {104553},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Greene, Gretchen; Picard, Rosalind; Urquhart, Lachlan; Valstar, Michel
Guest Editorial: Ethics in Affective Computing Journal Article
In: IEEE Trans. Affective Comput., vol. 15, no. 1, pp. 1–3, 2024, ISSN: 1949-3045, 2371-9850.
@article{gratch_guest_2024,
title = {Guest Editorial: Ethics in Affective Computing},
author = {Jonathan Gratch and Gretchen Greene and Rosalind Picard and Lachlan Urquhart and Michel Valstar},
url = {https://ieeexplore.ieee.org/document/10454111/},
doi = {10.1109/TAFFC.2023.3322918},
issn = {1949-3045, 2371-9850},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-14},
journal = {IEEE Trans. Affective Comput.},
volume = {15},
number = {1},
pages = {1–3},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 9798400700552.
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {9798400700552},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
Privacy-preserving Representation Learning for Speech Understanding Miscellaneous
2023, (arXiv:2310.17194 [eess]).
@misc{tran_privacy-preserving_2023,
title = {Privacy-preserving Representation Learning for Speech Understanding},
author = {Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2310.17194},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Existing privacy-preserving speech representation learning methods target a single application domain. In this paper, we present a novel framework to anonymize utterance-level speech embeddings generated by pre-trained encoders and show its effectiveness for a range of speech classification tasks. Specifically, given the representations from a pre-trained encoder, we train a Transformer to estimate the representations for the same utterances spoken by other speakers. During inference, the extracted representations can be converted into different identities to preserve privacy. We compare the results with the voice anonymization baselines from the VoicePrivacy 2022 challenge. We evaluate our framework on speaker identification for privacy and emotion recognition, depression classification, and intent classification for utility. Our method outperforms the baselines on privacy and utility in paralinguistic tasks and achieves comparable performance for intent classification.},
note = {arXiv:2310.17194 [eess]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; West, Taylor Nicole; Gratch, Jonathan; Fredrickson, Barbara
Can AI Agents Help Humans to Connect? Technical Report
PsyArXiv 2023.
@techreport{prinzing_can_2023,
title = {Can AI Agents Help Humans to Connect?},
author = {Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and Taylor Nicole West and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/muq6s},
doi = {10.31234/osf.io/muq6s},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
institution = {PsyArXiv},
abstract = {This paper reports on a pre-registered experiment designed to test whether artificial agents can help people to create more moments of high-quality connection with other humans. Of four pre-registered hypotheses, we found (partial) support for only one.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2024
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
Dyadic Interaction Modeling for Social Behavior Generation Miscellaneous
2024, (arXiv:2403.09069 [cs]).
Abstract | Links | BibTeX | Tags: Virtual Humans
@misc{tran_dyadic_2024,
title = {Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.09069},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-19},
publisher = {arXiv},
abstract = {Human-human communication is like a delicate dance where listeners and speakers concurrently interact to maintain conversational dynamics. Hence, an effective model for generating listener nonverbal behaviors requires understanding the dyadic context and interaction. In this paper, we present an effective framework for creating 3D facial motions in dyadic interactions. Existing work consider a listener as a reactive agent with reflexive behaviors to the speaker's voice and facial motions. The heart of our framework is Dyadic Interaction Modeling (DIM), a pre-training approach that jointly models speakers' and listeners' motions through masking and contrastive learning to learn representations that capture the dyadic context. To enable the generation of non-deterministic behaviors, we encode both listener and speaker motions into discrete latent representations, through VQ-VAE. The pre-trained model is further fine-tuned for motion generation. Extensive experiments demonstrate the superiority of our framework in generating listener motions, establishing a new state-of-the-art according to the quantitative measures capturing the diversity and realism of generated motions. Qualitative results demonstrate the superior capabilities of the proposed approach in generating diverse and realistic expressions, eye blinks and head gestures.},
note = {arXiv:2403.09069 [cs]},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Kwon, Deuksin; Weiss, Emily; Kulshrestha, Tara; Chawla, Kushal; Lucas, Gale M.; Gratch, Jonathan
Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues Miscellaneous
2024, (arXiv:2402.13550 [cs]).
Abstract | Links | BibTeX | Tags: AI, Virtual Humans
@misc{kwon_are_2024,
title = {Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues},
author = {Deuksin Kwon and Emily Weiss and Tara Kulshrestha and Kushal Chawla and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2402.13550},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
publisher = {arXiv},
abstract = {A successful negotiation demands a deep comprehension of the conversation context, Theory-of-Mind (ToM) skills to infer the partner's motives, as well as strategic reasoning and effective communication, making it challenging for automated systems. Given the remarkable performance of LLMs across a variety of NLP tasks, in this work, we aim to understand how LLMs can advance different aspects of negotiation research, ranging from designing dialogue systems to providing pedagogical feedback and scaling up data collection practices. To this end, we devise a methodology to analyze the multifaceted capabilities of LLMs across diverse dialogue scenarios covering all the time stages of a typical negotiation interaction. Our analysis adds to the increasing evidence for the superiority of GPT-4 across various tasks while also providing insights into specific tasks that remain difficult for LLMs. For instance, the models correlate poorly with human players when making subjective assessments about the negotiation dialogues and often struggle to generate responses that are contextually appropriate as well as strategically advantageous.},
note = {arXiv:2402.13550 [cs]},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Murawski, Alaine; Ramirez‐Zohfeld, Vanessa; Mell, Johnathan; Tschoe, Marianne; Schierer, Allison; Olvera, Charles; Brett, Jeanne; Gratch, Jonathan; Lindquist, Lee A.
Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program Journal Article
In: J American Geriatrics Society, pp. jgs.18775, 2024, ISSN: 0002-8614, 1532-5415.
Abstract | Links | BibTeX | Tags: AI, Virtual Humans
@article{murawski_development_2024,
title = {Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program},
author = {Alaine Murawski and Vanessa Ramirez‐Zohfeld and Johnathan Mell and Marianne Tschoe and Allison Schierer and Charles Olvera and Jeanne Brett and Jonathan Gratch and Lee A. Lindquist},
url = {https://agsjournals.onlinelibrary.wiley.com/doi/10.1111/jgs.18775},
doi = {10.1111/jgs.18775},
issn = {0002-8614, 1532-5415},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {J American Geriatrics Society},
pages = {jgs.18775},
abstract = {Abstract
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.
Shi, Zhonghao; O'Connell, Allison; Li, Zongjian; Liu, Siqi; Ayissi, Jennifer; Hoffman, Guy; Soleymani, Mohammad; Matarić, Maja J.
Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education Miscellaneous
2024, (arXiv:2402.01647 [cs]).
Abstract | Links | BibTeX | Tags: Virtual Humans
@misc{shi_build_2024,
title = {Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education},
author = {Zhonghao Shi and Allison O'Connell and Zongjian Li and Siqi Liu and Jennifer Ayissi and Guy Hoffman and Mohammad Soleymani and Maja J. Matarić},
url = {http://arxiv.org/abs/2402.01647},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {As artificial intelligence (AI) is playing an increasingly important role in our society and global economy, AI education and literacy have become necessary components in college and K-12 education to prepare students for an AI-powered society. However, current AI curricula have not yet been made accessible and engaging enough for students and schools from all socio-economic backgrounds with different educational goals. In this work, we developed an open-source learning module for college and high school students, which allows students to build their own robot companion from the ground up. This open platform can be used to provide hands-on experience and introductory knowledge about various aspects of AI, including robotics, machine learning (ML), software engineering, and mechanical engineering. Because of the social and personal nature of a socially assistive robot companion, this module also puts a special emphasis on human-centered AI, enabling students to develop a better understanding of human-AI interaction and AI ethics through hands-on learning activities. With open-source documentation, assembling manuals and affordable materials, students from different socio-economic backgrounds can personalize their learning experience based on their individual educational goals. To evaluate the student-perceived quality of our module, we conducted a usability testing workshop with 15 college students recruited from a minority-serving institution. Our results indicate that our AI module is effective, easy-to-follow, and engaging, and it increases student interest in studying AI/ML and robotics in the future. We hope that this work will contribute toward accessible and engaging AI education in human-AI interaction for college and high school students.},
note = {arXiv:2402.01647 [cs]},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Jago, Arthur S.; Raveendhran, Roshni; Fast, Nathanael; Gratch, Jonathan
Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles Journal Article
In: Journal of Experimental Social Psychology, vol. 110, pp. 104553, 2024, ISSN: 00221031.
Links | BibTeX | Tags: Virtual Humans
@article{jago_algorithmic_2024,
title = {Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles},
author = {Arthur S. Jago and Roshni Raveendhran and Nathanael Fast and Jonathan Gratch},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0022103123001105},
doi = {10.1016/j.jesp.2023.104553},
issn = {00221031},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {Journal of Experimental Social Psychology},
volume = {110},
pages = {104553},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Greene, Gretchen; Picard, Rosalind; Urquhart, Lachlan; Valstar, Michel
Guest Editorial: Ethics in Affective Computing Journal Article
In: IEEE Trans. Affective Comput., vol. 15, no. 1, pp. 1–3, 2024, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: Virtual Humans
@article{gratch_guest_2024,
title = {Guest Editorial: Ethics in Affective Computing},
author = {Jonathan Gratch and Gretchen Greene and Rosalind Picard and Lachlan Urquhart and Michel Valstar},
url = {https://ieeexplore.ieee.org/document/10454111/},
doi = {10.1109/TAFFC.2023.3322918},
issn = {1949-3045, 2371-9850},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-14},
journal = {IEEE Trans. Affective Comput.},
volume = {15},
number = {1},
pages = {1–3},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2023
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
Abstract | Links | BibTeX | Tags: AI, UARC, Virtual Humans
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
Abstract | Links | BibTeX | Tags: AI, Dialogue, UARC, Virtual Humans
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {AI, Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 9798400700552.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {9798400700552},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: AI, UARC, Virtual Humans
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
Privacy-preserving Representation Learning for Speech Understanding Miscellaneous
2023, (arXiv:2310.17194 [eess]).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@misc{tran_privacy-preserving_2023,
title = {Privacy-preserving Representation Learning for Speech Understanding},
author = {Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2310.17194},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Existing privacy-preserving speech representation learning methods target a single application domain. In this paper, we present a novel framework to anonymize utterance-level speech embeddings generated by pre-trained encoders and show its effectiveness for a range of speech classification tasks. Specifically, given the representations from a pre-trained encoder, we train a Transformer to estimate the representations for the same utterances spoken by other speakers. During inference, the extracted representations can be converted into different identities to preserve privacy. We compare the results with the voice anonymization baselines from the VoicePrivacy 2022 challenge. We evaluate our framework on speaker identification for privacy and emotion recognition, depression classification, and intent classification for utility. Our method outperforms the baselines on privacy and utility in paralinguistic tasks and achieves comparable performance for intent classification.},
note = {arXiv:2310.17194 [eess]},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; West, Taylor Nicole; Gratch, Jonathan; Fredrickson, Barbara
Can AI Agents Help Humans to Connect? Technical Report
PsyArXiv 2023.
Abstract | Links | BibTeX | Tags: AI, UARC, Virtual Humans
@techreport{prinzing_can_2023,
title = {Can AI Agents Help Humans to Connect?},
author = {Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and Taylor Nicole West and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/muq6s},
doi = {10.31234/osf.io/muq6s},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
institution = {PsyArXiv},
abstract = {This paper reports on a pre-registered experiment designed to test whether artificial agents can help people to create more moments of high-quality connection with other humans. Of four pre-registered hypotheses, we found (partial) support for only one.},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
Abstract | Links | BibTeX | Tags: Dialogue, UARC, Virtual Humans
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags: Machine Learning, UARC, Virtual Humans
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
Links | BibTeX | Tags: Emotions, UARC, Virtual Humans
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kappas, Arvid; Gratch, Jonathan
These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI Journal Article
In: Affec Sci, 2023, ISSN: 2662-2041, 2662-205X.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{kappas_these_2023,
title = {These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI},
author = {Arvid Kappas and Jonathan Gratch},
url = {https://link.springer.com/10.1007/s42761-023-00211-3},
doi = {10.1007/s42761-023-00211-3},
issn = {2662-2041, 2662-205X},
year = {2023},
date = {2023-08-01},
urldate = {2023-09-20},
journal = {Affec Sci},
abstract = {Abstract
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.
Chang, Di; Yin, Yufeng; Li, Zongjian; Tran, Minh; Soleymani, Mohammad
LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis Miscellaneous
2023, (arXiv:2308.10713 [cs]).
Abstract | Links | BibTeX | Tags: Virtual Humans
@misc{chang_libreface_2023,
title = {LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis},
author = {Di Chang and Yufeng Yin and Zongjian Li and Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.10713},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Facial expression analysis is an important tool for human-computer interaction. In this paper, we introduce LibreFace, an open-source toolkit for facial expression analysis. This open-source toolbox offers real-time and offline analysis of facial behavior through deep learning models, including facial action unit (AU) detection, AU intensity estimation, and facial expression recognition. To accomplish this, we employ several techniques, including the utilization of a large-scale pre-trained network, feature-wise knowledge distillation, and task-specific fine-tuning. These approaches are designed to effectively and accurately analyze facial expressions by leveraging visual information, thereby facilitating the implementation of real-time interactive applications. In terms of Action Unit (AU) intensity estimation, we achieve a Pearson Correlation Coefficient (PCC) of 0.63 on DISFA, which is 7% higher than the performance of OpenFace 2.0 while maintaining highly-efficient inference that runs two times faster than OpenFace 2.0. Despite being compact, our model also demonstrates competitive performance to state-of-the-art facial expression analysis methods on AffecNet, FFHQ, and RAF-DB. Our code will be released at https://github.com/ihp-lab/LibreFace},
note = {arXiv:2308.10713 [cs]},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Yin, Yufeng; Chang, Di; Song, Guoxian; Sang, Shen; Zhi, Tiancheng; Liu, Jing; Luo, Linjie; Soleymani, Mohammad
FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features Miscellaneous
2023, (arXiv:2308.12380 [cs]).
Abstract | Links | BibTeX | Tags: Virtual Humans
@misc{yin_fg-net_2023,
title = {FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features},
author = {Yufeng Yin and Di Chang and Guoxian Song and Shen Sang and Tiancheng Zhi and Jing Liu and Linjie Luo and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.12380},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Automatic detection of facial Action Units (AUs) allows for objective facial expression analysis. Due to the high cost of AU labeling and the limited size of existing benchmarks, previous AU detection methods tend to overfit the dataset, resulting in a significant performance loss when evaluated across corpora. To address this problem, we propose FG-Net for generalizable facial action unit detection. Specifically, FG-Net extracts feature maps from a StyleGAN2 model pre-trained on a large and diverse face image dataset. Then, these features are used to detect AUs with a Pyramid CNN Interpreter, making the training efficient and capturing essential local features. The proposed FG-Net achieves a strong generalization ability for heatmap-based AU detection thanks to the generalizable and semantic-rich features extracted from the pre-trained generative model. Extensive experiments are conducted to evaluate within- and cross-corpus AU detection with the widely-used DISFA and BP4D datasets. Compared with the state-of-the-art, the proposed method achieves superior cross-domain performance while maintaining competitive within-domain performance. In addition, FG-Net is data-efficient and achieves competitive performance even when trained on 1000 samples. Our code will be released at textbackslashurlhttps://github.com/ihp-lab/FG-Net},
note = {arXiv:2308.12380 [cs]},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Rodrigues, Patrick B.; Singh, Rashmi; Oytun, Mert; Adami, Pooya; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale M.
A multidimensional taxonomy for human-robot interaction in construction Journal Article
In: Automation in Construction, vol. 150, pp. 104845, 2023, ISSN: 0926-5805.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{rodrigues_multidimensional_2023,
title = {A multidimensional taxonomy for human-robot interaction in construction},
author = {Patrick B. Rodrigues and Rashmi Singh and Mert Oytun and Pooya Adami and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale M. Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S092658052300105X},
doi = {10.1016/j.autcon.2023.104845},
issn = {0926-5805},
year = {2023},
date = {2023-06-01},
urldate = {2023-03-31},
journal = {Automation in Construction},
volume = {150},
pages = {104845},
abstract = {Despite the increased interest in construction robotics both in academia and the industry, insufficient attention has been given to aspects related to Human-Robot Interaction (HRI). Characterizing HRI for construction tasks can help researchers organize knowledge in a structured manner that allows for classifying construction robotics applications and comparing and benchmarking different studies. This paper builds upon existing taxonomies and empirical studies in HRI in various industries (e.g., construction, manufacturing, and military, among others) to propose a multidimensional taxonomy to characterize HRI applications in the construction industry. The taxonomy design followed a systematic literature review in which common themes were identified and grouped into 16 categories. The proposed taxonomy can be used as a foundation for systematic reviews and meta-analyses of HRI applications in construction and can benefit the construction industry by informing the design of collaborative tasks performed by human-robot teams.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Soleymani, Mohammad
A Speech Representation Anonymization Framework via Selective Noise Perturbation Proceedings Article
In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1–5, IEEE, Rhodes Island, Greece, 2023, ISBN: 978-1-72816-327-7.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tran_speech_2023,
title = {A Speech Representation Anonymization Framework via Selective Noise Perturbation},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10095173/},
doi = {10.1109/ICASSP49357.2023.10095173},
isbn = {978-1-72816-327-7},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1–5},
publisher = {IEEE},
address = {Rhodes Island, Greece},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leitner, Maxyn; Greenwald, Eric; Wang, Ning; Montgomery, Ryan; Merchant, Chirag
Designing Game-Based Learning for High School Artificial Intelligence Education Journal Article
In: Int J Artif Intell Educ, vol. 33, no. 2, pp. 384–398, 2023, ISSN: 1560-4292, 1560-4306.
Abstract | Links | BibTeX | Tags: AI, Virtual Humans
@article{leitner_designing_2023,
title = {Designing Game-Based Learning for High School Artificial Intelligence Education},
author = {Maxyn Leitner and Eric Greenwald and Ning Wang and Ryan Montgomery and Chirag Merchant},
url = {https://link.springer.com/10.1007/s40593-022-00327-w},
doi = {10.1007/s40593-022-00327-w},
issn = {1560-4292, 1560-4306},
year = {2023},
date = {2023-06-01},
urldate = {2023-09-20},
journal = {Int J Artif Intell Educ},
volume = {33},
number = {2},
pages = {384–398},
abstract = {Abstract
Artificial Intelligence (AI) permeates every aspect of our daily lives and is no longer a subject reserved for a select few in higher education but is essential knowledge that our youth need for the future. Much is unknown about the level of AI knowledge that is age and developmentally appropriate for high school, let alone about how to teach AI to even younger learners. In this theoretical paper, we discuss the design of a game-based learning environment for high school AI education, drawing upon insights gained from a prior cognitive interview study at a STEM focused private high school. We argue that game-based learning is an excellent fit for AI education due to the commonality of problem solving in both game playing and AI.},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Artificial Intelligence (AI) permeates every aspect of our daily lives and is no longer a subject reserved for a select few in higher education but is essential knowledge that our youth need for the future. Much is unknown about the level of AI knowledge that is age and developmentally appropriate for high school, let alone about how to teach AI to even younger learners. In this theoretical paper, we discuss the design of a game-based learning environment for high school AI education, drawing upon insights gained from a prior cognitive interview study at a STEM focused private high school. We argue that game-based learning is an excellent fit for AI education due to the commonality of problem solving in both game playing and AI.
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning Journal Article
In: FLAIRS, vol. 36, 2023, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC, Virtual Humans
@article{aris_learning_2023,
title = {Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/133348},
doi = {10.32473/flairs.36.133348},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-04},
journal = {FLAIRS},
volume = {36},
abstract = {This paper presents a reinforcement learning model designed to learn how to take cover on geo-specific terrains, an essential behavior component for military training simulations. Training of the models is performed on the Rapid Integration and Development Environment (RIDE) leveraging the Unity ML-Agents framework. This work expands on previous work on raycast-based agents by increasing the number of enemies from one to three. We demonstrate an automated way of generating training and testing data within geo-specific terrains. We show that replacing the action space with a more abstracted, navmesh-based waypoint movement system can increase the generality and success rate of the models while providing similar results to our previous paper's results regarding retraining across terrains. We also comprehensively evaluate the differences between these and the previous models. Finally, we show that incorporating pixels into the model's input can increase performance at the cost of longer training times.},
keywords = {CogArch, Cognitive Architecture, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Murawski, Alaine; Ramirez-Zohfeld, Vanessa; Schierer, Allison; Olvera, Charles; Mell, Johnathan; Gratch, Jonathan; Brett, Jeanne; Lindquist, Lee A.
Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers Journal Article
In: Geriatrics, vol. 8, no. 2, pp. 36, 2023, ISSN: 2308-3417, (Number: 2 Publisher: Multidisciplinary Digital Publishing Institute).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{murawski_transforming_2023,
title = {Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers},
author = {Alaine Murawski and Vanessa Ramirez-Zohfeld and Allison Schierer and Charles Olvera and Johnathan Mell and Jonathan Gratch and Jeanne Brett and Lee A. Lindquist},
url = {https://www.mdpi.com/2308-3417/8/2/36},
doi = {10.3390/geriatrics8020036},
issn = {2308-3417},
year = {2023},
date = {2023-04-01},
urldate = {2023-03-31},
journal = {Geriatrics},
volume = {8},
number = {2},
pages = {36},
abstract = {Background: Family caregivers of older people with Alzheimer’s dementia (PWD) often need to advocate and resolve health-related conflicts (e.g., determining treatment necessity, billing errors, and home health extensions). As they deal with these health system conflicts, family caregivers experience unnecessary frustration, anxiety, and stress. The goal of this research was to apply a negotiation framework to resolve real-world family caregiver–older adult conflicts. Methods: We convened an interdisciplinary team of national community-based family caregivers, social workers, geriatricians, and negotiation experts (n = 9; Illinois, Florida, New York, and California) to examine the applicability of negotiation and conflict management frameworks to three older adult–caregiver conflicts (i.e., caregiver–older adult, caregiver–provider, and caregiver–caregiver). The panel of caregivers provided scenarios and dialogue describing conflicts they experienced in these three settings. A qualitative analysis was then performed grouping the responses into a framework matrix. Results: Upon presenting the three conflicts to the caregivers, 96 responses (caregiver–senior), 75 responses (caregiver–caregiver), and 80 responses (caregiver–provider) were generated. A thematic analysis showed that the statements and responses fit the interest–rights–power (IRP) negotiation framework. Discussion: The interests–rights–power (IRP) framework, used in business negotiations, provided insight into how caregivers experienced conflict with older adults, providers, and other caregivers. Future research is needed to examine applying the IRP framework in the training of caregivers of older people with Alzheimer’s dementia.},
note = {Number: 2
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective Journal Article
In: Philosophical Transactions of the Royal Society B: Biological Sciences, vol. 378, no. 1875, pp. 20210475, 2023, (Publisher: Royal Society).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{gratch_promise_2023,
title = {The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective},
author = {Jonathan Gratch},
url = {https://royalsocietypublishing.org/doi/abs/10.1098/rstb.2021.0475},
doi = {10.1098/rstb.2021.0475},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Philosophical Transactions of the Royal Society B: Biological Sciences},
volume = {378},
number = {1875},
pages = {20210475},
abstract = {In face-to-face interactions, parties rapidly react and adapt to each other's words, movements and expressions. Any science of face-to-face interaction must develop approaches to hypothesize and rigorously test mechanisms that explain such interdependent behaviour. Yet conventional experimental designs often sacrifice interactivity to establish experimental control. Interactive virtual and robotic agents have been offered as a way to study true interactivity while enforcing a measure of experimental control by allowing participants to interact with realistic but carefully controlled partners. But as researchers increasingly turn to machine learning to add realism to such agents, they may unintentionally distort the very interactivity they seek to illuminate, particularly when investigating the role of non-verbal signals such as emotion or active-listening behaviours. Here I discuss some of the methodological challenges that may arise when machine learning is used to model the behaviour of interaction partners. By articulating and explicitly considering these commitments, researchers can transform ‘unintentional distortions’ into valuable methodological tools that yield new insights and better contextualize existing experimental findings that rely on learning technology.
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.},
note = {Publisher: Royal Society},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.
Awada, Mohamad; Becerik-Gerber, Burcin; Liu, Ruying; Seyedrezaei, Mirmahdi; Lu, Zheng; Xenakis, Matheos; Lucas, Gale; Roll, Shawn C.; Narayanan, Shrikanth
Ten questions concerning the impact of environmental stress on office workers Journal Article
In: Building and Environment, vol. 229, pp. 109964, 2023, ISSN: 0360-1323.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{awada_ten_2023,
title = {Ten questions concerning the impact of environmental stress on office workers},
author = {Mohamad Awada and Burcin Becerik-Gerber and Ruying Liu and Mirmahdi Seyedrezaei and Zheng Lu and Matheos Xenakis and Gale Lucas and Shawn C. Roll and Shrikanth Narayanan},
url = {https://www.sciencedirect.com/science/article/pii/S0360132322011945},
doi = {10.1016/j.buildenv.2022.109964},
issn = {0360-1323},
year = {2023},
date = {2023-02-01},
urldate = {2023-03-31},
journal = {Building and Environment},
volume = {229},
pages = {109964},
abstract = {We regularly face stress during our everyday activities, to the extent that stress is recognized by the World Health Organization as the epidemic of the 21st century. Stress is how humans respond physically and psychologically to adjustments, experiences, conditions, and circumstances in their lives. While there are many reasons for stress, work and job pressure remain the main cause. Thus, companies are increasingly interested in creating healthier, more comfortable, and stress-free offices for their workers. The indoor environment can induce environmental stress when it cannot satisfy the individual needs for health and comfort. In fact, office environmental conditions (e.g., thermal, and indoor air conditions, lighting, and noise) and interior design parameters (e.g., office layout, colors, furniture, access to views, distance to window, personal control and biophilic design) have been found to affect office workers' stress levels. A line of research based on the stress recovery theory offers new insights for establishing offices that limit environmental stress and help with work stress recovery. To that end, this paper answers ten questions that explore the relation between the indoor office-built environment and stress levels among workers. The answers to the ten questions are based on an extensive literature review to draw conclusions from what has been achieved to date. Thus, this study presents a foundation for future environmental stress related research in offices.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Adami, Pooya; Singh, Rashmi; Rodrigues, Patrick Borges; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
In: Advanced Engineering Informatics, vol. 55, pp. 101837, 2023, ISSN: 1474-0346.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{adami_participants_2023,
title = {Participants matter: Effectiveness of VR-based training on the knowledge, trust in the robot, and self-efficacy of construction workers and university students},
author = {Pooya Adami and Rashmi Singh and Patrick Borges Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S1474034622002956},
doi = {10.1016/j.aei.2022.101837},
issn = {1474-0346},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Advanced Engineering Informatics},
volume = {55},
pages = {101837},
abstract = {Virtual Reality (VR)-based training has gained attention from the scientific community in the Architecture, Engineering, and Construction (AEC) industry as a cost-effective and safe method that eliminates the safety risks that may impose on workers during the training compared to traditional training methods (e.g., in-person hands-on training, apprenticeship). Although researchers have developed VR-based training for construction workers, some have recruited students rather than workers to understand the effect of their VR-based training. However, students are different from construction workers in many ways, which can threaten the validity of such studies. Hence, research is needed to investigate the extent to which the findings of a VR-based training study are contingent on whether students or construction workers were used as the study sample. This paper strives to compare the effectiveness of VR-based training on university students’ and construction workers’ knowledge acquisition, trust in the robot, and robot operation self-efficacy in remote operation of a construction robot. Twenty-five construction workers and twenty-five graduate construction engineering students were recruited to complete a VR-based training for remote operating a demolition robot. We used quantitative analyses to answer our research questions. Our study shows that the results are dependent on the target sample in that students gained more knowledge, whereas construction workers gained more trust in the robot and more self-efficacy in robot operation. These findings suggest that the effectiveness of VR-based training on students may not necessarily associate with its effectiveness on construction workers.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale M.; Mell, Johnathan; Boberg, Jill; Zenone, Forrest; Visser, Ewart J.; Tossell, Chad; Seech, Todd
Customizing virtual interpersonal skills training applications may not improve trainee performance Journal Article
In: Sci Rep, vol. 13, no. 1, pp. 78, 2023, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lucas_customizing_2023,
title = {Customizing virtual interpersonal skills training applications may not improve trainee performance},
author = {Gale M. Lucas and Johnathan Mell and Jill Boberg and Forrest Zenone and Ewart J. Visser and Chad Tossell and Todd Seech},
url = {https://www.nature.com/articles/s41598-022-27154-2},
doi = {10.1038/s41598-022-27154-2},
issn = {2045-2322},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {13},
number = {1},
pages = {78},
abstract = {While some theoretical perspectives imply that the context of a virtual training should be customized to match the intended context where those skills would ultimately be applied, others suggest this might not be necessary for learning. It is important to determine whether manipulating context matters for performance in training applications because customized virtual training systems made for specific use cases are more costly than generic “off-the-shelf” ones designed for a broader set of users. Accordingly, we report a study where military cadets use a virtual platform to practice their negotiation skills, and are randomly assigned to one of two virtual context conditions: military versus civilian. Out of 28 measures capturing performance in the negotiation, there was only one significant result: cadets in the civilian condition politely ask the agent to make an offer significantly more than those in the military condition. These results imply that—for this interpersonal skills application, and perhaps ones like it—virtual context may matter very little for performance during social skills training, and that commercial systems may yield real benefits to military scenarios with little-to-no modification.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lei, Su; Gratch, Jonathan
Emotional Expressivity is a Reliable Signal of Surprise Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lei_emotional_2023,
title = {Emotional Expressivity is a Reliable Signal of Surprise},
author = {Su Lei and Jonathan Gratch},
doi = {10.1109/TAFFC.2023.3234015},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {We consider the problem of inferring what happened to a person in a social task from momentary facial reactions. To approach this, we introduce several innovations. First, rather than predicting what (observers think) someone feels, we predict objective features of the event that immediately preceded the facial reactions. Second, we draw on appraisal theory, a key psychological theory of emotion, to characterize features of this immediately-preceded event. Specifically, we explore if facial expressions reveal if the event is expected, goal-congruent, and norm-compatible. Finally, we argue that emotional expressivity serves as a better feature for characterizing momentary expressions than traditional facial features. Specifically, we use supervised machine learning to predict third-party judgments of emotional expressivity with high accuracy, and show this model improves inferences about the nature of the event that preceded an emotional reaction. Contrary to common sense, “genuine smiles” failed to predict if an event advanced a person's goals. Rather, expressions best revealed if an event violated expectations. We discussed the implications of these findings for the interpretation of facial displays and potential limitations that could impact the generality of these findings.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale M.; Gratch, Jonathan
Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{chawla_towards_2023,
title = {Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale M. Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/abstract/document/10021626},
doi = {10.1109/TAFFC.2023.3238007},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {Negotiation is a complex social interaction that encapsulates emotional encounters in human decision-making. Virtual agents that can negotiate with humans by the means of language are useful in pedagogy and conversational AI. To advance the development of such agents, we explore the role of emotion in the prediction of two important subjective goals in a negotiation – outcome satisfaction and partner perception. We devise ways to measure and compare different degrees of emotion expression in negotiation dialogues, consisting of emoticon, lexical, and contextual variables. Through an extensive analysis of a large-scale dataset in chat-based negotiations, we find that incorporating emotion expression explains significantly more variance, above and beyond the demographics and personality traits of the participants. Further, our temporal analysis reveals that emotive information from both early and later stages of the negotiation contributes to this prediction, indicating the need for a continual learning model of capturing emotion for automated agents. Finally, we extend our analysis to another dataset, showing promise that our findings generalize to more complex scenarios. We conclude by discussing our insights, which will be helpful for designing adaptive negotiation agents that interact through realistic communication interfaces.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Gurney, Nikolos
The Design of Transparency Communication for Human-Multirobot Teams Book Section
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, vol. 14051, pp. 311–321, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35893-7 978-3-031-35894-4, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: AI, Virtual Humans
@incollection{degen_design_2023,
title = {The Design of Transparency Communication for Human-Multirobot Teams},
author = {Ning Wang and David V. Pynadath and Nikolos Gurney},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/10.1007/978-3-031-35894-4_23},
doi = {10.1007/978-3-031-35894-4_23},
isbn = {978-3-031-35893-7 978-3-031-35894-4},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
booktitle = {Artificial Intelligence in HCI},
volume = {14051},
pages = {311–321},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-01-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. Volume 45, 2023.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{hale_risk_2023,
title = {Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://escholarship.org/uc/item/7n01v4f9#main},
year = {2023},
date = {2023-01-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {Volume 45},
abstract = {Women and minorities obtain lower salaries when negotiating their employment compensation. Some have suggested that automated negotiation and dispute-resolution technology might address such material inequities. These algorithms elicit the multi-criteria preferences of each side of a dispute and arrive at solutions that are efficient and "provably" fair. In a study that explores the potential benefit of these methods, we highlight cognitive factors that may allow inequities to persist despite these methods. Specifically, risk-averse individuals express lower preferences for salary and as risk-aversion is more common in women and minorities, this translates into a ``provably'' fair lower salary. While this may reflect actual underlying differences in preferences across groups, individuals may be confounding their preferences for salary with their risk preference (i.e., their fear of not reaching an agreement), such that these groups achieve worse outcomes than they should. We further highlight that methodological choices in how negotiation processes are often studied can obscure the magnitude of this effect.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lu, Shuhong; Yoon, Youngwoo; Feng, Andrew
Co-Speech Gesture Synthesis using Discrete Gesture Token Learning Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lu_co-speech_2023,
title = {Co-Speech Gesture Synthesis using Discrete Gesture Token Learning},
author = {Shuhong Lu and Youngwoo Yoon and Andrew Feng},
url = {https://arxiv.org/abs/2303.12822},
doi = {10.48550/ARXIV.2303.12822},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
abstract = {Synthesizing realistic co-speech gestures is an important and yet unsolved problem for creating believable motions that can drive a humanoid robot to interact and communicate with human users. Such capability will improve the impressions of the robots by human users and will find applications in education, training, and medical services. One challenge in learning the co-speech gesture model is that there may be multiple viable gesture motions for the same speech utterance. The deterministic regression methods can not resolve the conflicting samples and may produce over-smoothed or damped motions. We proposed a two-stage model to address this uncertainty issue in gesture synthesis by modeling the gesture segments as discrete latent codes. Our method utilizes RQ-VAE in the first stage to learn a discrete codebook consisting of gesture tokens from training data. In the second stage, a two-level autoregressive transformer model is used to learn the prior distribution of residual codes conditioned on input speech context. Since the inference is formulated as token sampling, multiple gesture sequences could be generated given the same speech input using top-k sampling. The quantitative results and the user study showed the proposed method outperforms the previous methods and is able to generate realistic and diverse gesture motions.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M. De; Gratch, Jonathan; Marsella, Stacy; Pelachaud, Catherine
Social Functions of Machine Emotional Expressions Journal Article
In: Proc. IEEE, pp. 1–16, 2023, ISSN: 0018-9219, 1558-2256.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{de_melo_social_2023,
title = {Social Functions of Machine Emotional Expressions},
author = {Celso M. De Melo and Jonathan Gratch and Stacy Marsella and Catherine Pelachaud},
url = {https://ieeexplore.ieee.org/document/10093227/},
doi = {10.1109/JPROC.2023.3261137},
issn = {0018-9219, 1558-2256},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
journal = {Proc. IEEE},
pages = {1–16},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; Gil, Yolanda
Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation Book Section
In: Wang, Ning; Rebolledo-Mendez, Genaro; Dimitrova, Vania; Matsuda, Noboru; Santos, Olga C. (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky, vol. 1831, pp. 530–535, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36335-1 978-3-031-36336-8, (Series Title: Communications in Computer and Information Science).
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{wang_virtual_2023,
title = {Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch and Yolanda Gil},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Vania Dimitrova and Noboru Matsuda and Olga C. Santos},
url = {https://link.springer.com/10.1007/978-3-031-36336-8_82},
doi = {10.1007/978-3-031-36336-8_82},
isbn = {978-3-031-36335-1 978-3-031-36336-8},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky},
volume = {1831},
pages = {530–535},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Teaching Reverse Appraisal to Improve Negotiation Skills Journal Article
In: IEEE Trans. Affective Comput., pp. 1–14, 2023, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{sato_teaching_2023,
title = {Teaching Reverse Appraisal to Improve Negotiation Skills},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/10189838/},
doi = {10.1109/TAFFC.2023.3285931},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
journal = {IEEE Trans. Affective Comput.},
pages = {1–14},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Tak, Ala N.; Gratch, Jonathan
Is GPT a Computational Model of Emotion? Detailed Analysis Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{tak_is_2023,
title = {Is GPT a Computational Model of Emotion? Detailed Analysis},
author = {Ala N. Tak and Jonathan Gratch},
url = {https://arxiv.org/abs/2307.13779},
doi = {10.48550/ARXIV.2307.13779},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
abstract = {This paper investigates the emotional reasoning abilities of the GPT family of large language models via a component perspective. The paper first examines how the model reasons about autobiographical memories. Second, it systematically varies aspects of situations to impact emotion intensity and coping tendencies. Even without the use of prompt engineering, it is shown that GPT's predictions align significantly with human-provided appraisals and emotional labels. However, GPT faces difficulties predicting emotion intensity and coping responses. GPT-4 showed the highest performance in the initial study but fell short in the second, despite providing superior results after minor prompt engineering. This assessment brings up questions on how to effectively employ the strong points and address the weak areas of these models, particularly concerning response variability. These studies underscore the merits of evaluating models from a componential perspective.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis Book Section
In: Kurosu, Masaaki; Hashizume, Ayako (Ed.): Human-Computer Interaction, vol. 14013, pp. 407–418, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35601-8 978-3-031-35602-5, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: AI, Virtual Humans
@incollection{kurosu_relationship_2023,
title = {The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu and Ayako Hashizume},
url = {https://link.springer.com/10.1007/978-3-031-35602-5_29},
doi = {10.1007/978-3-031-35602-5_29},
isbn = {978-3-031-35601-8 978-3-031-35602-5},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Human-Computer Interaction},
volume = {14013},
pages = {407–418},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
2022
Difede, JoAnn; Rothbaum, Barbara O.; Rizzo, Albert A.; Wyka, Katarzyna; Spielman, Lisa; Reist, Christopher; Roy, Michael J.; Jovanovic, Tanja; Norrholm, Seth D.; Cukor, Judith; Olden, Megan; Glatt, Charles E.; Lee, Francis S.
In: Transl Psychiatry, vol. 12, no. 1, pp. 299, 2022, ISSN: 2158-3188.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, Virtual Humans
@article{difede_enhancing_2022,
title = {Enhancing exposure therapy for posttraumatic stress disorder (PTSD): a randomized clinical trial of virtual reality and imaginal exposure with a cognitive enhancer},
author = {JoAnn Difede and Barbara O. Rothbaum and Albert A. Rizzo and Katarzyna Wyka and Lisa Spielman and Christopher Reist and Michael J. Roy and Tanja Jovanovic and Seth D. Norrholm and Judith Cukor and Megan Olden and Charles E. Glatt and Francis S. Lee},
url = {https://www.nature.com/articles/s41398-022-02066-x},
doi = {10.1038/s41398-022-02066-x},
issn = {2158-3188},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-13},
journal = {Transl Psychiatry},
volume = {12},
number = {1},
pages = {299},
abstract = {Abstract Posttraumatic stress disorder (PTSD) is a significant public health issue. Yet, there are limited treatment options and no data to suggest which treatment will work for whom. We tested the efficacy of virtual reality exposure (VRE) or prolonged imaginal exposure (PE), augmented with D-cycloserine (DCS) for combat-related PTSD. As an exploratory aim, we examined whether brain-derived neurotrophic factor (BDNF) and fatty acid amide hydrolase (FAAH) moderated treatment response. Military personnel with PTSD ( n = 192) were recruited into a multisite double-blind randomized controlled trial to receive nine weeks of VRE or PE, with DCS or placebo. Primary outcome was the improvement in symptom severity. Randomization was stratified by comorbid depression (MDD) and site. Participants in both VRE and PE showed similar meaningful clinical improvement with no difference between the treatment groups. A significant interaction ( p = 0.45) suggested VRE was more effective for depressed participants (CAPS difference M = 3.51 [95% CI 1.17–5.86]},
keywords = {DTIC, MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}