Publications
Search
Mozgai, Sharon A; Kaurloto, Cari; Winn, Jade G; Leeds, Andrew; Beland, Sarah; Sookiassian, Arman; Hartholt, Arno
Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–8, ACM, Honolulu HI USA, 2024, ISBN: 9798400703317.
@inproceedings{mozgai_accelerating_2024,
title = {Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool},
author = {Sharon A Mozgai and Cari Kaurloto and Jade G Winn and Andrew Leeds and Sarah Beland and Arman Sookiassian and Arno Hartholt},
url = {https://dl.acm.org/doi/10.1145/3613905.3637110},
doi = {10.1145/3613905.3637110},
isbn = {9798400703317},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-18},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
Dyadic Interaction Modeling for Social Behavior Generation Miscellaneous
2024, (arXiv:2403.09069 [cs]).
@misc{tran_dyadic_2024,
title = {Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.09069},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-19},
publisher = {arXiv},
abstract = {Human-human communication is like a delicate dance where listeners and speakers concurrently interact to maintain conversational dynamics. Hence, an effective model for generating listener nonverbal behaviors requires understanding the dyadic context and interaction. In this paper, we present an effective framework for creating 3D facial motions in dyadic interactions. Existing work consider a listener as a reactive agent with reflexive behaviors to the speaker's voice and facial motions. The heart of our framework is Dyadic Interaction Modeling (DIM), a pre-training approach that jointly models speakers' and listeners' motions through masking and contrastive learning to learn representations that capture the dyadic context. To enable the generation of non-deterministic behaviors, we encode both listener and speaker motions into discrete latent representations, through VQ-VAE. The pre-trained model is further fine-tuned for motion generation. Extensive experiments demonstrate the superiority of our framework in generating listener motions, establishing a new state-of-the-art according to the quantitative measures capturing the diversity and realism of generated motions. Qualitative results demonstrate the superior capabilities of the proposed approach in generating diverse and realistic expressions, eye blinks and head gestures.},
note = {arXiv:2403.09069 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Lu, Liupei; Yin, Yufeng; Gu, Yuming; Wu, Yizhen; Prasad, Pratusha; Zhao, Yajie; Soleymani, Mohammad
Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection Miscellaneous
2024, (arXiv:2403.10737 [cs]).
@misc{lu_leveraging_2024,
title = {Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection},
author = {Liupei Lu and Yufeng Yin and Yuming Gu and Yizhen Wu and Pratusha Prasad and Yajie Zhao and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.10737},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
publisher = {arXiv},
abstract = {Facial action unit (AU) detection is a fundamental block for objective facial expression analysis. Supervised learning approaches require a large amount of manual labeling which is costly. The limited labeled data are also not diverse in terms of gender which can affect model fairness. In this paper, we propose to use synthetically generated data and multi-source domain adaptation (MSDA) to address the problems of the scarcity of labeled data and the diversity of subjects. Specifically, we propose to generate a diverse dataset through synthetic facial expression re-targeting by transferring the expressions from real faces to synthetic avatars. Then, we use MSDA to transfer the AU detection knowledge from a real dataset and the synthetic dataset to a target dataset. Instead of aligning the overall distributions of different domains, we propose Paired Moment Matching (PM2) to align the features of the paired real and synthetic data with the same facial expression. To further improve gender fairness, PM2 matches the features of the real data with a female and a male synthetic image. Our results indicate that synthetic data and the proposed model improve both AU detection performance and fairness across genders, demonstrating its potential to solve AU detection in-the-wild.},
note = {arXiv:2403.10737 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Kwon, Deuksin; Weiss, Emily; Kulshrestha, Tara; Chawla, Kushal; Lucas, Gale M.; Gratch, Jonathan
Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues Miscellaneous
2024, (arXiv:2402.13550 [cs]).
@misc{kwon_are_2024,
title = {Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues},
author = {Deuksin Kwon and Emily Weiss and Tara Kulshrestha and Kushal Chawla and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2402.13550},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
publisher = {arXiv},
abstract = {A successful negotiation demands a deep comprehension of the conversation context, Theory-of-Mind (ToM) skills to infer the partner's motives, as well as strategic reasoning and effective communication, making it challenging for automated systems. Given the remarkable performance of LLMs across a variety of NLP tasks, in this work, we aim to understand how LLMs can advance different aspects of negotiation research, ranging from designing dialogue systems to providing pedagogical feedback and scaling up data collection practices. To this end, we devise a methodology to analyze the multifaceted capabilities of LLMs across diverse dialogue scenarios covering all the time stages of a typical negotiation interaction. Our analysis adds to the increasing evidence for the superiority of GPT-4 across various tasks while also providing insights into specific tasks that remain difficult for LLMs. For instance, the models correlate poorly with human players when making subjective assessments about the negotiation dialogues and often struggle to generate responses that are contextually appropriate as well as strategically advantageous.},
note = {arXiv:2402.13550 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Murawski, Alaine; Ramirez‐Zohfeld, Vanessa; Mell, Johnathan; Tschoe, Marianne; Schierer, Allison; Olvera, Charles; Brett, Jeanne; Gratch, Jonathan; Lindquist, Lee A.
Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program Journal Article
In: Journal of the American Geriatrics Society, pp. jgs.18775, 2024, ISSN: 0002-8614, 1532-5415.
@article{murawski_development_2024,
title = {Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program},
author = {Alaine Murawski and Vanessa Ramirez‐Zohfeld and Johnathan Mell and Marianne Tschoe and Allison Schierer and Charles Olvera and Jeanne Brett and Jonathan Gratch and Lee A. Lindquist},
url = {https://agsjournals.onlinelibrary.wiley.com/doi/10.1111/jgs.18775},
doi = {10.1111/jgs.18775},
issn = {0002-8614, 1532-5415},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {Journal of the American Geriatrics Society},
pages = {jgs.18775},
abstract = {Abstract
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.
Gratch, Jonathan; Greene, Gretchen; Picard, Rosalind; Urquhart, Lachlan; Valstar, Michel
Guest Editorial: Ethics in Affective Computing Journal Article
In: IEEE Transactions on Affective Computing, vol. 15, no. 1, pp. 1–3, 2024, ISSN: 1949-3045, 2371-9850.
@article{gratch_guest_2024,
title = {Guest Editorial: Ethics in Affective Computing},
author = {Jonathan Gratch and Gretchen Greene and Rosalind Picard and Lachlan Urquhart and Michel Valstar},
url = {https://ieeexplore.ieee.org/document/10454111/},
doi = {10.1109/TAFFC.2023.3322918},
issn = {1949-3045, 2371-9850},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-14},
journal = {IEEE Transactions on Affective Computing},
volume = {15},
number = {1},
pages = {1–3},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Shi, Zhonghao; O'Connell, Allison; Li, Zongjian; Liu, Siqi; Ayissi, Jennifer; Hoffman, Guy; Soleymani, Mohammad; Matarić, Maja J.
Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education Miscellaneous
2024, (arXiv:2402.01647 [cs]).
@misc{shi_build_2024,
title = {Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education},
author = {Zhonghao Shi and Allison O'Connell and Zongjian Li and Siqi Liu and Jennifer Ayissi and Guy Hoffman and Mohammad Soleymani and Maja J. Matarić},
url = {http://arxiv.org/abs/2402.01647},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {As artificial intelligence (AI) is playing an increasingly important role in our society and global economy, AI education and literacy have become necessary components in college and K-12 education to prepare students for an AI-powered society. However, current AI curricula have not yet been made accessible and engaging enough for students and schools from all socio-economic backgrounds with different educational goals. In this work, we developed an open-source learning module for college and high school students, which allows students to build their own robot companion from the ground up. This open platform can be used to provide hands-on experience and introductory knowledge about various aspects of AI, including robotics, machine learning (ML), software engineering, and mechanical engineering. Because of the social and personal nature of a socially assistive robot companion, this module also puts a special emphasis on human-centered AI, enabling students to develop a better understanding of human-AI interaction and AI ethics through hands-on learning activities. With open-source documentation, assembling manuals and affordable materials, students from different socio-economic backgrounds can personalize their learning experience based on their individual educational goals. To evaluate the student-perceived quality of our module, we conducted a usability testing workshop with 15 college students recruited from a minority-serving institution. Our results indicate that our AI module is effective, easy-to-follow, and engaging, and it increases student interest in studying AI/ML and robotics in the future. We hope that this work will contribute toward accessible and engaging AI education in human-AI interaction for college and high school students.},
note = {arXiv:2402.01647 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Jago, Arthur S.; Raveendhran, Roshni; Fast, Nathanael; Gratch, Jonathan
Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles Journal Article
In: Journal of Experimental Social Psychology, vol. 110, pp. 104553, 2024, ISSN: 00221031.
@article{jago_algorithmic_2024,
title = {Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles},
author = {Arthur S. Jago and Roshni Raveendhran and Nathanael Fast and Jonathan Gratch},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0022103123001105},
doi = {10.1016/j.jesp.2023.104553},
issn = {00221031},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {Journal of Experimental Social Psychology},
volume = {110},
pages = {104553},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Leeds, Andrew; Fast, Ed; Sookiassian, Edwin; Kim, Kevin; Beland, Sarah; Kulkarni, Pranav; Mozgai, Sharon
Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms Proceedings Article
In: 2024.
@inproceedings{hartholt_multidisciplinary_2024,
title = {Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms},
author = {Arno Hartholt and Andrew Leeds and Ed Fast and Edwin Sookiassian and Kevin Kim and Sarah Beland and Pranav Kulkarni and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-95-7/article/978-1-958651-95-7_33},
doi = {10.54941/ahfe1004497},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
abstract = {The current pace of technological advancements has led to an ever-increasing availability of technologies to investigate and help address the challenges that contemporary society faces today. However, while this trend increases the potential for creating more relevant, effective, and efficient solutions, it also inherently increases the complexity of realizing that potential. Our work aims to manage this complexity through the creation and dissemination of integrated middleware platforms that enable researchers and developers to rapidly prototype novel solutions within the areas of modelling & simulation, virtual humans, and virtual worlds. In this paper, we discuss two related platforms: the Rapid Integration & Development Environment (RIDE) and the Virtual Human Toolkit (VHToolkit). Specifically, we explore two use cases: 1) the development of an authoring tool aimed at domain experts to rapidly create low-echelon military training scenarios, and 2) the development of a virtual human led mHealth wellness and suicide prevention app for veterans.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rodrigues, Patrick B.; Becerik-Gerber, Burcin; Soibelman, Lucio; Lucas, Gale M.; Roll, Shawn C.
Virtual Environment for Studying the Effects of Operational and Environmental Sounds on Teleoperated Demolition Proceedings Article
In: Computing in Civil Engineering 2023, pp. 54–61, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8523-1.
@inproceedings{rodrigues_virtual_2024,
title = {Virtual Environment for Studying the Effects of Operational and Environmental Sounds on Teleoperated Demolition},
author = {Patrick B. Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Gale M. Lucas and Shawn C. Roll},
url = {https://ascelibrary.org/doi/10.1061/9780784485231.007},
doi = {10.1061/9780784485231.007},
isbn = {978-0-7844-8523-1},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {54–61},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ruying; Becerik-Gerber, Burçin; Lucas, Gale M.; Busta, Kelly
Development of a VR Training Platform for Active Shooter Incident Preparedness in Healthcare Environments via a Stakeholder-Engaged Process Proceedings Article
In: Computing in Civil Engineering 2023, pp. 45–53, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8523-1.
@inproceedings{liu_development_2024,
title = {Development of a VR Training Platform for Active Shooter Incident Preparedness in Healthcare Environments via a Stakeholder-Engaged Process},
author = {Ruying Liu and Burçin Becerik-Gerber and Gale M. Lucas and Kelly Busta},
url = {https://ascelibrary.org/doi/10.1061/9780784485231.006},
doi = {10.1061/9780784485231.006},
isbn = {978-0-7844-8523-1},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {45–53},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Seyedrezaei, Mirmahdi; Becerik-Gerber, Burcin; Lucas, Gale
Investigating the Interplay between Indoor Environmental Quality and Workers’ Health and Productivity: Preliminary Results Proceedings Article
In: Computing in Civil Engineering 2023, pp. 614–622, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8524-8.
@inproceedings{awada_investigating_2024,
title = {Investigating the Interplay between Indoor Environmental Quality and Workers’ Health and Productivity: Preliminary Results},
author = {Mohamad Awada and Mirmahdi Seyedrezaei and Burcin Becerik-Gerber and Gale Lucas},
url = {https://ascelibrary.org/doi/10.1061/9780784485248.074},
doi = {10.1061/9780784485248.074},
isbn = {978-0-7844-8524-8},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {614–622},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Hao; Chang, Di; Li, Fang; Soleymani, Mohammad; Ahuja, Narendra
MagicPose4D: Crafting Articulated Models with Appearance and Motion Control Miscellaneous
2024, (Version Number: 1).
@misc{zhang_magicpose4d_2024,
title = {MagicPose4D: Crafting Articulated Models with Appearance and Motion Control},
author = {Hao Zhang and Di Chang and Fang Li and Mohammad Soleymani and Narendra Ahuja},
url = {https://arxiv.org/abs/2405.14017},
doi = {10.48550/ARXIV.2405.14017},
year = {2024},
date = {2024-01-01},
urldate = {2024-06-25},
publisher = {arXiv},
abstract = {With the success of 2D and 3D visual generative models, there is growing interest in generating 4D content. Existing methods primarily rely on text prompts to produce 4D content, but they often fall short of accurately defining complex or rare motions. To address this limitation, we propose MagicPose4D, a novel framework for refined control over both appearance and motion in 4D generation. Unlike traditional methods, MagicPose4D accepts monocular videos as motion prompts, enabling precise and customizable motion generation. MagicPose4D comprises two key modules:
i) Dual-Phase 4D Reconstruction Modulevphantom which operates in two phases. The first phase focuses on capturing the model's shape using accurate 2D supervision and less accurate but geometrically informative 3D pseudo-supervision without imposing skeleton constraints. The second phase refines the model using more accurate pseudo-3D supervision, obtained in the first phase and introduces kinematic chain-based skeleton constraints to ensure physical plausibility. Additionally, we propose a Global-local Chamfer loss that aligns the overall distribution of predicted mesh vertices with the supervision while maintaining part-level alignment without extra annotations.
ii) Cross-category Motion Transfer Modulevphantom leverages the predictions from the 4D reconstruction module and uses a kinematic-chain-based skeleton to achieve cross-category motion transfer. It ensures smooth transitions between frames through dynamic rigidity, facilitating robust generalization without additional training.
Through extensive experiments, we demonstrate that MagicPose4D significantly improves the accuracy and consistency of 4D content generation, outperforming existing methods in various benchmarks.},
note = {Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
i) Dual-Phase 4D Reconstruction Modulevphantom which operates in two phases. The first phase focuses on capturing the model's shape using accurate 2D supervision and less accurate but geometrically informative 3D pseudo-supervision without imposing skeleton constraints. The second phase refines the model using more accurate pseudo-3D supervision, obtained in the first phase and introduces kinematic chain-based skeleton constraints to ensure physical plausibility. Additionally, we propose a Global-local Chamfer loss that aligns the overall distribution of predicted mesh vertices with the supervision while maintaining part-level alignment without extra annotations.
ii) Cross-category Motion Transfer Modulevphantom leverages the predictions from the 4D reconstruction module and uses a kinematic-chain-based skeleton to achieve cross-category motion transfer. It ensures smooth transitions between frames through dynamic rigidity, facilitating robust generalization without additional training.
Through extensive experiments, we demonstrate that MagicPose4D significantly improves the accuracy and consistency of 4D content generation, outperforming existing methods in various benchmarks.
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Filter
2024
Mozgai, Sharon A; Kaurloto, Cari; Winn, Jade G; Leeds, Andrew; Beland, Sarah; Sookiassian, Arman; Hartholt, Arno
Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–8, ACM, Honolulu HI USA, 2024, ISBN: 9798400703317.
Links | BibTeX | Tags: AI, UARC, Virtual Humans
@inproceedings{mozgai_accelerating_2024,
title = {Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool},
author = {Sharon A Mozgai and Cari Kaurloto and Jade G Winn and Andrew Leeds and Sarah Beland and Arman Sookiassian and Arno Hartholt},
url = {https://dl.acm.org/doi/10.1145/3613905.3637110},
doi = {10.1145/3613905.3637110},
isbn = {9798400703317},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-18},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
Dyadic Interaction Modeling for Social Behavior Generation Miscellaneous
2024, (arXiv:2403.09069 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@misc{tran_dyadic_2024,
title = {Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.09069},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-19},
publisher = {arXiv},
abstract = {Human-human communication is like a delicate dance where listeners and speakers concurrently interact to maintain conversational dynamics. Hence, an effective model for generating listener nonverbal behaviors requires understanding the dyadic context and interaction. In this paper, we present an effective framework for creating 3D facial motions in dyadic interactions. Existing work consider a listener as a reactive agent with reflexive behaviors to the speaker's voice and facial motions. The heart of our framework is Dyadic Interaction Modeling (DIM), a pre-training approach that jointly models speakers' and listeners' motions through masking and contrastive learning to learn representations that capture the dyadic context. To enable the generation of non-deterministic behaviors, we encode both listener and speaker motions into discrete latent representations, through VQ-VAE. The pre-trained model is further fine-tuned for motion generation. Extensive experiments demonstrate the superiority of our framework in generating listener motions, establishing a new state-of-the-art according to the quantitative measures capturing the diversity and realism of generated motions. Qualitative results demonstrate the superior capabilities of the proposed approach in generating diverse and realistic expressions, eye blinks and head gestures.},
note = {arXiv:2403.09069 [cs]},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Lu, Liupei; Yin, Yufeng; Gu, Yuming; Wu, Yizhen; Prasad, Pratusha; Zhao, Yajie; Soleymani, Mohammad
Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection Miscellaneous
2024, (arXiv:2403.10737 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@misc{lu_leveraging_2024,
title = {Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection},
author = {Liupei Lu and Yufeng Yin and Yuming Gu and Yizhen Wu and Pratusha Prasad and Yajie Zhao and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.10737},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
publisher = {arXiv},
abstract = {Facial action unit (AU) detection is a fundamental block for objective facial expression analysis. Supervised learning approaches require a large amount of manual labeling which is costly. The limited labeled data are also not diverse in terms of gender which can affect model fairness. In this paper, we propose to use synthetically generated data and multi-source domain adaptation (MSDA) to address the problems of the scarcity of labeled data and the diversity of subjects. Specifically, we propose to generate a diverse dataset through synthetic facial expression re-targeting by transferring the expressions from real faces to synthetic avatars. Then, we use MSDA to transfer the AU detection knowledge from a real dataset and the synthetic dataset to a target dataset. Instead of aligning the overall distributions of different domains, we propose Paired Moment Matching (PM2) to align the features of the paired real and synthetic data with the same facial expression. To further improve gender fairness, PM2 matches the features of the real data with a female and a male synthetic image. Our results indicate that synthetic data and the proposed model improve both AU detection performance and fairness across genders, demonstrating its potential to solve AU detection in-the-wild.},
note = {arXiv:2403.10737 [cs]},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Kwon, Deuksin; Weiss, Emily; Kulshrestha, Tara; Chawla, Kushal; Lucas, Gale M.; Gratch, Jonathan
Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues Miscellaneous
2024, (arXiv:2402.13550 [cs]).
Abstract | Links | BibTeX | Tags: AI, Virtual Humans
@misc{kwon_are_2024,
title = {Are LLMs Effective Negotiators? Systematic Evaluation of the Multifaceted Capabilities of LLMs in Negotiation Dialogues},
author = {Deuksin Kwon and Emily Weiss and Tara Kulshrestha and Kushal Chawla and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2402.13550},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
publisher = {arXiv},
abstract = {A successful negotiation demands a deep comprehension of the conversation context, Theory-of-Mind (ToM) skills to infer the partner's motives, as well as strategic reasoning and effective communication, making it challenging for automated systems. Given the remarkable performance of LLMs across a variety of NLP tasks, in this work, we aim to understand how LLMs can advance different aspects of negotiation research, ranging from designing dialogue systems to providing pedagogical feedback and scaling up data collection practices. To this end, we devise a methodology to analyze the multifaceted capabilities of LLMs across diverse dialogue scenarios covering all the time stages of a typical negotiation interaction. Our analysis adds to the increasing evidence for the superiority of GPT-4 across various tasks while also providing insights into specific tasks that remain difficult for LLMs. For instance, the models correlate poorly with human players when making subjective assessments about the negotiation dialogues and often struggle to generate responses that are contextually appropriate as well as strategically advantageous.},
note = {arXiv:2402.13550 [cs]},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Murawski, Alaine; Ramirez‐Zohfeld, Vanessa; Mell, Johnathan; Tschoe, Marianne; Schierer, Allison; Olvera, Charles; Brett, Jeanne; Gratch, Jonathan; Lindquist, Lee A.
Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program Journal Article
In: Journal of the American Geriatrics Society, pp. jgs.18775, 2024, ISSN: 0002-8614, 1532-5415.
Abstract | Links | BibTeX | Tags: AI, Virtual Humans
@article{murawski_development_2024,
title = {Development and pilot testing of an artificial intelligence‐based family caregiver negotiation program},
author = {Alaine Murawski and Vanessa Ramirez‐Zohfeld and Johnathan Mell and Marianne Tschoe and Allison Schierer and Charles Olvera and Jeanne Brett and Jonathan Gratch and Lee A. Lindquist},
url = {https://agsjournals.onlinelibrary.wiley.com/doi/10.1111/jgs.18775},
doi = {10.1111/jgs.18775},
issn = {0002-8614, 1532-5415},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {Journal of the American Geriatrics Society},
pages = {jgs.18775},
abstract = {Abstract
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Background
Family caregivers of people with Alzheimer's disease experience conflicts as they navigate health care but lack training to resolve these disputes. We sought to develop and pilot test an artificial‐intelligence negotiation training program, NegotiAge, for family caregivers.
Methods
We convened negotiation experts, a geriatrician, a social worker, and community‐based family caregivers. Content matter experts created short videos to teach negotiation skills. Caregivers generated dialogue surrounding conflicts. Computer scientists utilized the dialogue with the Interactive Arbitration Guide Online (IAGO) platform to develop avatar‐based agents (e.g., sibling, older adult, physician) for caregivers to practice negotiating. Pilot testing was conducted with family caregivers to assess usability (USE) and satisfaction (open‐ended questions with thematic analysis).
Results
Development: With NegotiAge, caregivers progress through didactic material, then receive scenarios to negotiate (e.g., physician recommends gastric tube, sibling disagrees with home support, older adult refusing support). Caregivers negotiate in real‐time with avatars who are designed to act like humans, including emotional tactics and irrational behaviors. Caregivers send/receive offers, using tactics until either mutual agreement or time expires. Immediate feedback is generated for the user to improve skills training. Pilot testing: Family caregivers (
n = 12) completed the program and survey. USE questionnaire (Likert scale 1–7) subset scores revealed: (1) Useful—Mean 5.69 (SD 0.76); (2) Ease—Mean 5.24 (SD 0.96); (3) Learn—Mean 5.69 (SD 0.74); (4) Satisfy—Mean 5.62 (SD 1.10). Items that received over 80% agreements were: It helps me be more effective; It helps me be more productive; It is useful; It gives me more control over the activities in my life; It makes the things I want to accomplish easier to get done. Participants were highly satisfied and found NegotiAge fun to use (91.7%), with 100% who would recommend it to a friend.
Conclusion
NegotiAge is an Artificial‐Intelligent Caregiver Negotiation Program, that is usable and feasible for family caregivers to become familiar with negotiating conflicts commonly seen in health care.
Gratch, Jonathan; Greene, Gretchen; Picard, Rosalind; Urquhart, Lachlan; Valstar, Michel
Guest Editorial: Ethics in Affective Computing Journal Article
In: IEEE Transactions on Affective Computing, vol. 15, no. 1, pp. 1–3, 2024, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: Virtual Humans
@article{gratch_guest_2024,
title = {Guest Editorial: Ethics in Affective Computing},
author = {Jonathan Gratch and Gretchen Greene and Rosalind Picard and Lachlan Urquhart and Michel Valstar},
url = {https://ieeexplore.ieee.org/document/10454111/},
doi = {10.1109/TAFFC.2023.3322918},
issn = {1949-3045, 2371-9850},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-14},
journal = {IEEE Transactions on Affective Computing},
volume = {15},
number = {1},
pages = {1–3},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Shi, Zhonghao; O'Connell, Allison; Li, Zongjian; Liu, Siqi; Ayissi, Jennifer; Hoffman, Guy; Soleymani, Mohammad; Matarić, Maja J.
Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education Miscellaneous
2024, (arXiv:2402.01647 [cs]).
Abstract | Links | BibTeX | Tags: Virtual Humans
@misc{shi_build_2024,
title = {Build Your Own Robot Friend: An Open-Source Learning Module for Accessible and Engaging AI Education},
author = {Zhonghao Shi and Allison O'Connell and Zongjian Li and Siqi Liu and Jennifer Ayissi and Guy Hoffman and Mohammad Soleymani and Maja J. Matarić},
url = {http://arxiv.org/abs/2402.01647},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {As artificial intelligence (AI) is playing an increasingly important role in our society and global economy, AI education and literacy have become necessary components in college and K-12 education to prepare students for an AI-powered society. However, current AI curricula have not yet been made accessible and engaging enough for students and schools from all socio-economic backgrounds with different educational goals. In this work, we developed an open-source learning module for college and high school students, which allows students to build their own robot companion from the ground up. This open platform can be used to provide hands-on experience and introductory knowledge about various aspects of AI, including robotics, machine learning (ML), software engineering, and mechanical engineering. Because of the social and personal nature of a socially assistive robot companion, this module also puts a special emphasis on human-centered AI, enabling students to develop a better understanding of human-AI interaction and AI ethics through hands-on learning activities. With open-source documentation, assembling manuals and affordable materials, students from different socio-economic backgrounds can personalize their learning experience based on their individual educational goals. To evaluate the student-perceived quality of our module, we conducted a usability testing workshop with 15 college students recruited from a minority-serving institution. Our results indicate that our AI module is effective, easy-to-follow, and engaging, and it increases student interest in studying AI/ML and robotics in the future. We hope that this work will contribute toward accessible and engaging AI education in human-AI interaction for college and high school students.},
note = {arXiv:2402.01647 [cs]},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Jago, Arthur S.; Raveendhran, Roshni; Fast, Nathanael; Gratch, Jonathan
Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles Journal Article
In: Journal of Experimental Social Psychology, vol. 110, pp. 104553, 2024, ISSN: 00221031.
Links | BibTeX | Tags: Virtual Humans
@article{jago_algorithmic_2024,
title = {Algorithmic management diminishes status: An unintended consequence of using machines to perform social roles},
author = {Arthur S. Jago and Roshni Raveendhran and Nathanael Fast and Jonathan Gratch},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0022103123001105},
doi = {10.1016/j.jesp.2023.104553},
issn = {00221031},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {Journal of Experimental Social Psychology},
volume = {110},
pages = {104553},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Leeds, Andrew; Fast, Ed; Sookiassian, Edwin; Kim, Kevin; Beland, Sarah; Kulkarni, Pranav; Mozgai, Sharon
Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms Proceedings Article
In: 2024.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hartholt_multidisciplinary_2024,
title = {Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms},
author = {Arno Hartholt and Andrew Leeds and Ed Fast and Edwin Sookiassian and Kevin Kim and Sarah Beland and Pranav Kulkarni and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-95-7/article/978-1-958651-95-7_33},
doi = {10.54941/ahfe1004497},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
abstract = {The current pace of technological advancements has led to an ever-increasing availability of technologies to investigate and help address the challenges that contemporary society faces today. However, while this trend increases the potential for creating more relevant, effective, and efficient solutions, it also inherently increases the complexity of realizing that potential. Our work aims to manage this complexity through the creation and dissemination of integrated middleware platforms that enable researchers and developers to rapidly prototype novel solutions within the areas of modelling & simulation, virtual humans, and virtual worlds. In this paper, we discuss two related platforms: the Rapid Integration & Development Environment (RIDE) and the Virtual Human Toolkit (VHToolkit). Specifically, we explore two use cases: 1) the development of an authoring tool aimed at domain experts to rapidly create low-echelon military training scenarios, and 2) the development of a virtual human led mHealth wellness and suicide prevention app for veterans.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rodrigues, Patrick B.; Becerik-Gerber, Burcin; Soibelman, Lucio; Lucas, Gale M.; Roll, Shawn C.
Virtual Environment for Studying the Effects of Operational and Environmental Sounds on Teleoperated Demolition Proceedings Article
In: Computing in Civil Engineering 2023, pp. 54–61, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8523-1.
Links | BibTeX | Tags: Virtual Humans, VR
@inproceedings{rodrigues_virtual_2024,
title = {Virtual Environment for Studying the Effects of Operational and Environmental Sounds on Teleoperated Demolition},
author = {Patrick B. Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Gale M. Lucas and Shawn C. Roll},
url = {https://ascelibrary.org/doi/10.1061/9780784485231.007},
doi = {10.1061/9780784485231.007},
isbn = {978-0-7844-8523-1},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {54–61},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ruying; Becerik-Gerber, Burçin; Lucas, Gale M.; Busta, Kelly
Development of a VR Training Platform for Active Shooter Incident Preparedness in Healthcare Environments via a Stakeholder-Engaged Process Proceedings Article
In: Computing in Civil Engineering 2023, pp. 45–53, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8523-1.
Links | BibTeX | Tags: Virtual Humans, VR
@inproceedings{liu_development_2024,
title = {Development of a VR Training Platform for Active Shooter Incident Preparedness in Healthcare Environments via a Stakeholder-Engaged Process},
author = {Ruying Liu and Burçin Becerik-Gerber and Gale M. Lucas and Kelly Busta},
url = {https://ascelibrary.org/doi/10.1061/9780784485231.006},
doi = {10.1061/9780784485231.006},
isbn = {978-0-7844-8523-1},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {45–53},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Seyedrezaei, Mirmahdi; Becerik-Gerber, Burcin; Lucas, Gale
Investigating the Interplay between Indoor Environmental Quality and Workers’ Health and Productivity: Preliminary Results Proceedings Article
In: Computing in Civil Engineering 2023, pp. 614–622, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8524-8.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{awada_investigating_2024,
title = {Investigating the Interplay between Indoor Environmental Quality and Workers’ Health and Productivity: Preliminary Results},
author = {Mohamad Awada and Mirmahdi Seyedrezaei and Burcin Becerik-Gerber and Gale Lucas},
url = {https://ascelibrary.org/doi/10.1061/9780784485248.074},
doi = {10.1061/9780784485248.074},
isbn = {978-0-7844-8524-8},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {614–622},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Hao; Chang, Di; Li, Fang; Soleymani, Mohammad; Ahuja, Narendra
MagicPose4D: Crafting Articulated Models with Appearance and Motion Control Miscellaneous
2024, (Version Number: 1).
Abstract | Links | BibTeX | Tags: Virtual Humans
@misc{zhang_magicpose4d_2024,
title = {MagicPose4D: Crafting Articulated Models with Appearance and Motion Control},
author = {Hao Zhang and Di Chang and Fang Li and Mohammad Soleymani and Narendra Ahuja},
url = {https://arxiv.org/abs/2405.14017},
doi = {10.48550/ARXIV.2405.14017},
year = {2024},
date = {2024-01-01},
urldate = {2024-06-25},
publisher = {arXiv},
abstract = {With the success of 2D and 3D visual generative models, there is growing interest in generating 4D content. Existing methods primarily rely on text prompts to produce 4D content, but they often fall short of accurately defining complex or rare motions. To address this limitation, we propose MagicPose4D, a novel framework for refined control over both appearance and motion in 4D generation. Unlike traditional methods, MagicPose4D accepts monocular videos as motion prompts, enabling precise and customizable motion generation. MagicPose4D comprises two key modules:
i) Dual-Phase 4D Reconstruction Modulevphantom which operates in two phases. The first phase focuses on capturing the model's shape using accurate 2D supervision and less accurate but geometrically informative 3D pseudo-supervision without imposing skeleton constraints. The second phase refines the model using more accurate pseudo-3D supervision, obtained in the first phase and introduces kinematic chain-based skeleton constraints to ensure physical plausibility. Additionally, we propose a Global-local Chamfer loss that aligns the overall distribution of predicted mesh vertices with the supervision while maintaining part-level alignment without extra annotations.
ii) Cross-category Motion Transfer Modulevphantom leverages the predictions from the 4D reconstruction module and uses a kinematic-chain-based skeleton to achieve cross-category motion transfer. It ensures smooth transitions between frames through dynamic rigidity, facilitating robust generalization without additional training.
Through extensive experiments, we demonstrate that MagicPose4D significantly improves the accuracy and consistency of 4D content generation, outperforming existing methods in various benchmarks.},
note = {Version Number: 1},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
i) Dual-Phase 4D Reconstruction Modulevphantom which operates in two phases. The first phase focuses on capturing the model's shape using accurate 2D supervision and less accurate but geometrically informative 3D pseudo-supervision without imposing skeleton constraints. The second phase refines the model using more accurate pseudo-3D supervision, obtained in the first phase and introduces kinematic chain-based skeleton constraints to ensure physical plausibility. Additionally, we propose a Global-local Chamfer loss that aligns the overall distribution of predicted mesh vertices with the supervision while maintaining part-level alignment without extra annotations.
ii) Cross-category Motion Transfer Modulevphantom leverages the predictions from the 4D reconstruction module and uses a kinematic-chain-based skeleton to achieve cross-category motion transfer. It ensures smooth transitions between frames through dynamic rigidity, facilitating robust generalization without additional training.
Through extensive experiments, we demonstrate that MagicPose4D significantly improves the accuracy and consistency of 4D content generation, outperforming existing methods in various benchmarks.
2023
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
Abstract | Links | BibTeX | Tags: AI, Dialogue, DTIC, UARC, Virtual Humans
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {AI, Dialogue, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags: DTIC, Machine Learning, UARC, Virtual Humans
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {DTIC, Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
Abstract | Links | BibTeX | Tags: Dialogue, DTIC, UARC, Virtual Humans
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {Dialogue, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; West, Taylor Nicole; Gratch, Jonathan; Fredrickson, Barbara
Can AI Agents Help Humans to Connect? Technical Report
PsyArXiv 2023.
Abstract | Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@techreport{prinzing_can_2023,
title = {Can AI Agents Help Humans to Connect?},
author = {Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and Taylor Nicole West and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/muq6s},
doi = {10.31234/osf.io/muq6s},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
institution = {PsyArXiv},
abstract = {This paper reports on a pre-registered experiment designed to test whether artificial agents can help people to create more moments of high-quality connection with other humans. Of four pre-registered hypotheses, we found (partial) support for only one.},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
Privacy-preserving Representation Learning for Speech Understanding Miscellaneous
2023, (arXiv:2310.17194 [eess]).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@misc{tran_privacy-preserving_2023,
title = {Privacy-preserving Representation Learning for Speech Understanding},
author = {Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2310.17194},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Existing privacy-preserving speech representation learning methods target a single application domain. In this paper, we present a novel framework to anonymize utterance-level speech embeddings generated by pre-trained encoders and show its effectiveness for a range of speech classification tasks. Specifically, given the representations from a pre-trained encoder, we train a Transformer to estimate the representations for the same utterances spoken by other speakers. During inference, the extracted representations can be converted into different identities to preserve privacy. We compare the results with the voice anonymization baselines from the VoicePrivacy 2022 challenge. We evaluate our framework on speaker identification for privacy and emotion recognition, depression classification, and intent classification for utility. Our method outperforms the baselines on privacy and utility in paralinguistic tasks and achieves comparable performance for intent classification.},
note = {arXiv:2310.17194 [eess]},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: AI, UARC, Virtual Humans
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 9798400700552.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {9798400700552},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
Links | BibTeX | Tags: DTIC, MxR, UARC, Virtual Humans
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {DTIC, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chang, Di; Yin, Yufeng; Li, Zongjian; Tran, Minh; Soleymani, Mohammad
LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis Miscellaneous
2023, (arXiv:2308.10713 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@misc{chang_libreface_2023,
title = {LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis},
author = {Di Chang and Yufeng Yin and Zongjian Li and Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.10713},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Facial expression analysis is an important tool for human-computer interaction. In this paper, we introduce LibreFace, an open-source toolkit for facial expression analysis. This open-source toolbox offers real-time and offline analysis of facial behavior through deep learning models, including facial action unit (AU) detection, AU intensity estimation, and facial expression recognition. To accomplish this, we employ several techniques, including the utilization of a large-scale pre-trained network, feature-wise knowledge distillation, and task-specific fine-tuning. These approaches are designed to effectively and accurately analyze facial expressions by leveraging visual information, thereby facilitating the implementation of real-time interactive applications. In terms of Action Unit (AU) intensity estimation, we achieve a Pearson Correlation Coefficient (PCC) of 0.63 on DISFA, which is 7% higher than the performance of OpenFace 2.0 while maintaining highly-efficient inference that runs two times faster than OpenFace 2.0. Despite being compact, our model also demonstrates competitive performance to state-of-the-art facial expression analysis methods on AffecNet, FFHQ, and RAF-DB. Our code will be released at https://github.com/ihp-lab/LibreFace},
note = {arXiv:2308.10713 [cs]},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Yin, Yufeng; Chang, Di; Song, Guoxian; Sang, Shen; Zhi, Tiancheng; Liu, Jing; Luo, Linjie; Soleymani, Mohammad
FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features Miscellaneous
2023, (arXiv:2308.12380 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@misc{yin_fg-net_2023,
title = {FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features},
author = {Yufeng Yin and Di Chang and Guoxian Song and Shen Sang and Tiancheng Zhi and Jing Liu and Linjie Luo and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.12380},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Automatic detection of facial Action Units (AUs) allows for objective facial expression analysis. Due to the high cost of AU labeling and the limited size of existing benchmarks, previous AU detection methods tend to overfit the dataset, resulting in a significant performance loss when evaluated across corpora. To address this problem, we propose FG-Net for generalizable facial action unit detection. Specifically, FG-Net extracts feature maps from a StyleGAN2 model pre-trained on a large and diverse face image dataset. Then, these features are used to detect AUs with a Pyramid CNN Interpreter, making the training efficient and capturing essential local features. The proposed FG-Net achieves a strong generalization ability for heatmap-based AU detection thanks to the generalizable and semantic-rich features extracted from the pre-trained generative model. Extensive experiments are conducted to evaluate within- and cross-corpus AU detection with the widely-used DISFA and BP4D datasets. Compared with the state-of-the-art, the proposed method achieves superior cross-domain performance while maintaining competitive within-domain performance. In addition, FG-Net is data-efficient and achieves competitive performance even when trained on 1000 samples. Our code will be released at textbackslashurlhttps://github.com/ihp-lab/FG-Net},
note = {arXiv:2308.12380 [cs]},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Kappas, Arvid; Gratch, Jonathan
These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI Journal Article
In: Affective Science, 2023, ISSN: 2662-2041, 2662-205X.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{kappas_these_2023,
title = {These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI},
author = {Arvid Kappas and Jonathan Gratch},
url = {https://link.springer.com/10.1007/s42761-023-00211-3},
doi = {10.1007/s42761-023-00211-3},
issn = {2662-2041, 2662-205X},
year = {2023},
date = {2023-08-01},
urldate = {2023-09-20},
journal = {Affective Science},
abstract = {Abstract
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rodrigues, Patrick B.; Singh, Rashmi; Oytun, Mert; Adami, Pooya; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale M.
A multidimensional taxonomy for human-robot interaction in construction Journal Article
In: Automation in Construction, vol. 150, pp. 104845, 2023, ISSN: 0926-5805.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{rodrigues_multidimensional_2023,
title = {A multidimensional taxonomy for human-robot interaction in construction},
author = {Patrick B. Rodrigues and Rashmi Singh and Mert Oytun and Pooya Adami and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale M. Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S092658052300105X},
doi = {10.1016/j.autcon.2023.104845},
issn = {0926-5805},
year = {2023},
date = {2023-06-01},
urldate = {2023-03-31},
journal = {Automation in Construction},
volume = {150},
pages = {104845},
abstract = {Despite the increased interest in construction robotics both in academia and the industry, insufficient attention has been given to aspects related to Human-Robot Interaction (HRI). Characterizing HRI for construction tasks can help researchers organize knowledge in a structured manner that allows for classifying construction robotics applications and comparing and benchmarking different studies. This paper builds upon existing taxonomies and empirical studies in HRI in various industries (e.g., construction, manufacturing, and military, among others) to propose a multidimensional taxonomy to characterize HRI applications in the construction industry. The taxonomy design followed a systematic literature review in which common themes were identified and grouped into 16 categories. The proposed taxonomy can be used as a foundation for systematic reviews and meta-analyses of HRI applications in construction and can benefit the construction industry by informing the design of collaborative tasks performed by human-robot teams.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Leitner, Maxyn; Greenwald, Eric; Wang, Ning; Montgomery, Ryan; Merchant, Chirag
Designing Game-Based Learning for High School Artificial Intelligence Education Journal Article
In: International Journal of Artificial Intelligence in Education, vol. 33, no. 2, pp. 384–398, 2023, ISSN: 1560-4292, 1560-4306.
Abstract | Links | BibTeX | Tags: AI, Virtual Humans
@article{leitner_designing_2023,
title = {Designing Game-Based Learning for High School Artificial Intelligence Education},
author = {Maxyn Leitner and Eric Greenwald and Ning Wang and Ryan Montgomery and Chirag Merchant},
url = {https://link.springer.com/10.1007/s40593-022-00327-w},
doi = {10.1007/s40593-022-00327-w},
issn = {1560-4292, 1560-4306},
year = {2023},
date = {2023-06-01},
urldate = {2023-09-20},
journal = {International Journal of Artificial Intelligence in Education},
volume = {33},
number = {2},
pages = {384–398},
abstract = {Abstract
Artificial Intelligence (AI) permeates every aspect of our daily lives and is no longer a subject reserved for a select few in higher education but is essential knowledge that our youth need for the future. Much is unknown about the level of AI knowledge that is age and developmentally appropriate for high school, let alone about how to teach AI to even younger learners. In this theoretical paper, we discuss the design of a game-based learning environment for high school AI education, drawing upon insights gained from a prior cognitive interview study at a STEM focused private high school. We argue that game-based learning is an excellent fit for AI education due to the commonality of problem solving in both game playing and AI.},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Artificial Intelligence (AI) permeates every aspect of our daily lives and is no longer a subject reserved for a select few in higher education but is essential knowledge that our youth need for the future. Much is unknown about the level of AI knowledge that is age and developmentally appropriate for high school, let alone about how to teach AI to even younger learners. In this theoretical paper, we discuss the design of a game-based learning environment for high school AI education, drawing upon insights gained from a prior cognitive interview study at a STEM focused private high school. We argue that game-based learning is an excellent fit for AI education due to the commonality of problem solving in both game playing and AI.
Tran, Minh; Soleymani, Mohammad
A Speech Representation Anonymization Framework via Selective Noise Perturbation Proceedings Article
In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1–5, IEEE, Rhodes Island, Greece, 2023, ISBN: 978-1-72816-327-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{tran_speech_2023,
title = {A Speech Representation Anonymization Framework via Selective Noise Perturbation},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10095173/},
doi = {10.1109/ICASSP49357.2023.10095173},
isbn = {978-1-72816-327-7},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1–5},
publisher = {IEEE},
address = {Rhodes Island, Greece},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning Journal Article
In: The International FLAIRS Conference Proceedings, vol. 36, 2023, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, DTIC, UARC, Virtual Humans
@article{aris_learning_2023,
title = {Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/133348},
doi = {10.32473/flairs.36.133348},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-04},
journal = {The International FLAIRS Conference Proceedings},
volume = {36},
abstract = {This paper presents a reinforcement learning model designed to learn how to take cover on geo-specific terrains, an essential behavior component for military training simulations. Training of the models is performed on the Rapid Integration and Development Environment (RIDE) leveraging the Unity ML-Agents framework. This work expands on previous work on raycast-based agents by increasing the number of enemies from one to three. We demonstrate an automated way of generating training and testing data within geo-specific terrains. We show that replacing the action space with a more abstracted, navmesh-based waypoint movement system can increase the generality and success rate of the models while providing similar results to our previous paper's results regarding retraining across terrains. We also comprehensively evaluate the differences between these and the previous models. Finally, we show that incorporating pixels into the model's input can increase performance at the cost of longer training times.},
keywords = {CogArch, Cognitive Architecture, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Murawski, Alaine; Ramirez-Zohfeld, Vanessa; Schierer, Allison; Olvera, Charles; Mell, Johnathan; Gratch, Jonathan; Brett, Jeanne; Lindquist, Lee A.
Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers Journal Article
In: Geriatrics, vol. 8, no. 2, pp. 36, 2023, ISSN: 2308-3417, (Number: 2 Publisher: Multidisciplinary Digital Publishing Institute).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{murawski_transforming_2023,
title = {Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers},
author = {Alaine Murawski and Vanessa Ramirez-Zohfeld and Allison Schierer and Charles Olvera and Johnathan Mell and Jonathan Gratch and Jeanne Brett and Lee A. Lindquist},
url = {https://www.mdpi.com/2308-3417/8/2/36},
doi = {10.3390/geriatrics8020036},
issn = {2308-3417},
year = {2023},
date = {2023-04-01},
urldate = {2023-03-31},
journal = {Geriatrics},
volume = {8},
number = {2},
pages = {36},
abstract = {Background: Family caregivers of older people with Alzheimer’s dementia (PWD) often need to advocate and resolve health-related conflicts (e.g., determining treatment necessity, billing errors, and home health extensions). As they deal with these health system conflicts, family caregivers experience unnecessary frustration, anxiety, and stress. The goal of this research was to apply a negotiation framework to resolve real-world family caregiver–older adult conflicts. Methods: We convened an interdisciplinary team of national community-based family caregivers, social workers, geriatricians, and negotiation experts (n = 9; Illinois, Florida, New York, and California) to examine the applicability of negotiation and conflict management frameworks to three older adult–caregiver conflicts (i.e., caregiver–older adult, caregiver–provider, and caregiver–caregiver). The panel of caregivers provided scenarios and dialogue describing conflicts they experienced in these three settings. A qualitative analysis was then performed grouping the responses into a framework matrix. Results: Upon presenting the three conflicts to the caregivers, 96 responses (caregiver–senior), 75 responses (caregiver–caregiver), and 80 responses (caregiver–provider) were generated. A thematic analysis showed that the statements and responses fit the interest–rights–power (IRP) negotiation framework. Discussion: The interests–rights–power (IRP) framework, used in business negotiations, provided insight into how caregivers experienced conflict with older adults, providers, and other caregivers. Future research is needed to examine applying the IRP framework in the training of caregivers of older people with Alzheimer’s dementia.},
note = {Number: 2
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective Journal Article
In: Philosophical Transactions of the Royal Society B: Biological Sciences, vol. 378, no. 1875, pp. 20210475, 2023, (Publisher: Royal Society).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{gratch_promise_2023,
title = {The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective},
author = {Jonathan Gratch},
url = {https://royalsocietypublishing.org/doi/abs/10.1098/rstb.2021.0475},
doi = {10.1098/rstb.2021.0475},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Philosophical Transactions of the Royal Society B: Biological Sciences},
volume = {378},
number = {1875},
pages = {20210475},
abstract = {In face-to-face interactions, parties rapidly react and adapt to each other's words, movements and expressions. Any science of face-to-face interaction must develop approaches to hypothesize and rigorously test mechanisms that explain such interdependent behaviour. Yet conventional experimental designs often sacrifice interactivity to establish experimental control. Interactive virtual and robotic agents have been offered as a way to study true interactivity while enforcing a measure of experimental control by allowing participants to interact with realistic but carefully controlled partners. But as researchers increasingly turn to machine learning to add realism to such agents, they may unintentionally distort the very interactivity they seek to illuminate, particularly when investigating the role of non-verbal signals such as emotion or active-listening behaviours. Here I discuss some of the methodological challenges that may arise when machine learning is used to model the behaviour of interaction partners. By articulating and explicitly considering these commitments, researchers can transform ‘unintentional distortions’ into valuable methodological tools that yield new insights and better contextualize existing experimental findings that rely on learning technology.
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.},
note = {Publisher: Royal Society},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.
Awada, Mohamad; Becerik-Gerber, Burcin; Liu, Ruying; Seyedrezaei, Mirmahdi; Lu, Zheng; Xenakis, Matheos; Lucas, Gale; Roll, Shawn C.; Narayanan, Shrikanth
Ten questions concerning the impact of environmental stress on office workers Journal Article
In: Building and Environment, vol. 229, pp. 109964, 2023, ISSN: 0360-1323.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{awada_ten_2023,
title = {Ten questions concerning the impact of environmental stress on office workers},
author = {Mohamad Awada and Burcin Becerik-Gerber and Ruying Liu and Mirmahdi Seyedrezaei and Zheng Lu and Matheos Xenakis and Gale Lucas and Shawn C. Roll and Shrikanth Narayanan},
url = {https://www.sciencedirect.com/science/article/pii/S0360132322011945},
doi = {10.1016/j.buildenv.2022.109964},
issn = {0360-1323},
year = {2023},
date = {2023-02-01},
urldate = {2023-03-31},
journal = {Building and Environment},
volume = {229},
pages = {109964},
abstract = {We regularly face stress during our everyday activities, to the extent that stress is recognized by the World Health Organization as the epidemic of the 21st century. Stress is how humans respond physically and psychologically to adjustments, experiences, conditions, and circumstances in their lives. While there are many reasons for stress, work and job pressure remain the main cause. Thus, companies are increasingly interested in creating healthier, more comfortable, and stress-free offices for their workers. The indoor environment can induce environmental stress when it cannot satisfy the individual needs for health and comfort. In fact, office environmental conditions (e.g., thermal, and indoor air conditions, lighting, and noise) and interior design parameters (e.g., office layout, colors, furniture, access to views, distance to window, personal control and biophilic design) have been found to affect office workers' stress levels. A line of research based on the stress recovery theory offers new insights for establishing offices that limit environmental stress and help with work stress recovery. To that end, this paper answers ten questions that explore the relation between the indoor office-built environment and stress levels among workers. The answers to the ten questions are based on an extensive literature review to draw conclusions from what has been achieved to date. Thus, this study presents a foundation for future environmental stress related research in offices.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Singh, Rashmi; Rodrigues, Patrick Borges; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
In: Advanced Engineering Informatics, vol. 55, pp. 101837, 2023, ISSN: 1474-0346.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{adami_participants_2023,
title = {Participants matter: Effectiveness of VR-based training on the knowledge, trust in the robot, and self-efficacy of construction workers and university students},
author = {Pooya Adami and Rashmi Singh and Patrick Borges Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S1474034622002956},
doi = {10.1016/j.aei.2022.101837},
issn = {1474-0346},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Advanced Engineering Informatics},
volume = {55},
pages = {101837},
abstract = {Virtual Reality (VR)-based training has gained attention from the scientific community in the Architecture, Engineering, and Construction (AEC) industry as a cost-effective and safe method that eliminates the safety risks that may impose on workers during the training compared to traditional training methods (e.g., in-person hands-on training, apprenticeship). Although researchers have developed VR-based training for construction workers, some have recruited students rather than workers to understand the effect of their VR-based training. However, students are different from construction workers in many ways, which can threaten the validity of such studies. Hence, research is needed to investigate the extent to which the findings of a VR-based training study are contingent on whether students or construction workers were used as the study sample. This paper strives to compare the effectiveness of VR-based training on university students’ and construction workers’ knowledge acquisition, trust in the robot, and robot operation self-efficacy in remote operation of a construction robot. Twenty-five construction workers and twenty-five graduate construction engineering students were recruited to complete a VR-based training for remote operating a demolition robot. We used quantitative analyses to answer our research questions. Our study shows that the results are dependent on the target sample in that students gained more knowledge, whereas construction workers gained more trust in the robot and more self-efficacy in robot operation. These findings suggest that the effectiveness of VR-based training on students may not necessarily associate with its effectiveness on construction workers.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lei, Su; Gratch, Jonathan
Emotional Expressivity is a Reliable Signal of Surprise Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{lei_emotional_2023,
title = {Emotional Expressivity is a Reliable Signal of Surprise},
author = {Su Lei and Jonathan Gratch},
doi = {10.1109/TAFFC.2023.3234015},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {We consider the problem of inferring what happened to a person in a social task from momentary facial reactions. To approach this, we introduce several innovations. First, rather than predicting what (observers think) someone feels, we predict objective features of the event that immediately preceded the facial reactions. Second, we draw on appraisal theory, a key psychological theory of emotion, to characterize features of this immediately-preceded event. Specifically, we explore if facial expressions reveal if the event is expected, goal-congruent, and norm-compatible. Finally, we argue that emotional expressivity serves as a better feature for characterizing momentary expressions than traditional facial features. Specifically, we use supervised machine learning to predict third-party judgments of emotional expressivity with high accuracy, and show this model improves inferences about the nature of the event that preceded an emotional reaction. Contrary to common sense, “genuine smiles” failed to predict if an event advanced a person's goals. Rather, expressions best revealed if an event violated expectations. We discussed the implications of these findings for the interpretation of facial displays and potential limitations that could impact the generality of these findings.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale M.; Gratch, Jonathan
Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{chawla_towards_2023,
title = {Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale M. Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/abstract/document/10021626},
doi = {10.1109/TAFFC.2023.3238007},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {Negotiation is a complex social interaction that encapsulates emotional encounters in human decision-making. Virtual agents that can negotiate with humans by the means of language are useful in pedagogy and conversational AI. To advance the development of such agents, we explore the role of emotion in the prediction of two important subjective goals in a negotiation – outcome satisfaction and partner perception. We devise ways to measure and compare different degrees of emotion expression in negotiation dialogues, consisting of emoticon, lexical, and contextual variables. Through an extensive analysis of a large-scale dataset in chat-based negotiations, we find that incorporating emotion expression explains significantly more variance, above and beyond the demographics and personality traits of the participants. Further, our temporal analysis reveals that emotive information from both early and later stages of the negotiation contributes to this prediction, indicating the need for a continual learning model of capturing emotion for automated agents. Finally, we extend our analysis to another dataset, showing promise that our findings generalize to more complex scenarios. We conclude by discussing our insights, which will be helpful for designing adaptive negotiation agents that interact through realistic communication interfaces.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Tak, Ala N.; Gratch, Jonathan
Is GPT a Computational Model of Emotion? Detailed Analysis Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{tak_is_2023,
title = {Is GPT a Computational Model of Emotion? Detailed Analysis},
author = {Ala N. Tak and Jonathan Gratch},
url = {https://arxiv.org/abs/2307.13779},
doi = {10.48550/ARXIV.2307.13779},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
abstract = {This paper investigates the emotional reasoning abilities of the GPT family of large language models via a component perspective. The paper first examines how the model reasons about autobiographical memories. Second, it systematically varies aspects of situations to impact emotion intensity and coping tendencies. Even without the use of prompt engineering, it is shown that GPT's predictions align significantly with human-provided appraisals and emotional labels. However, GPT faces difficulties predicting emotion intensity and coping responses. GPT-4 showed the highest performance in the initial study but fell short in the second, despite providing superior results after minor prompt engineering. This assessment brings up questions on how to effectively employ the strong points and address the weak areas of these models, particularly concerning response variability. These studies underscore the merits of evaluating models from a componential perspective.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Teaching Reverse Appraisal to Improve Negotiation Skills Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–14, 2023, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{sato_teaching_2023,
title = {Teaching Reverse Appraisal to Improve Negotiation Skills},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/10189838/},
doi = {10.1109/TAFFC.2023.3285931},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
journal = {IEEE Transactions on Affective Computing},
pages = {1–14},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; Gil, Yolanda
Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation Book Section
In: Wang, Ning; Rebolledo-Mendez, Genaro; Dimitrova, Vania; Matsuda, Noboru; Santos, Olga C. (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky, vol. 1831, pp. 530–535, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36335-1 978-3-031-36336-8, (Series Title: Communications in Computer and Information Science).
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{wang_virtual_2023,
title = {Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch and Yolanda Gil},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Vania Dimitrova and Noboru Matsuda and Olga C. Santos},
url = {https://link.springer.com/10.1007/978-3-031-36336-8_82},
doi = {10.1007/978-3-031-36336-8_82},
isbn = {978-3-031-36335-1 978-3-031-36336-8},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky},
volume = {1831},
pages = {530–535},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis Book Section
In: Kurosu, Masaaki; Hashizume, Ayako (Ed.): Human-Computer Interaction, vol. 14013, pp. 407–418, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35601-8 978-3-031-35602-5, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: AI, Virtual Humans
@incollection{kurosu_relationship_2023,
title = {The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu and Ayako Hashizume},
url = {https://link.springer.com/10.1007/978-3-031-35602-5_29},
doi = {10.1007/978-3-031-35602-5_29},
isbn = {978-3-031-35601-8 978-3-031-35602-5},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Human-Computer Interaction},
volume = {14013},
pages = {407–418},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pynadath, David V.; Gurney, Nikolos
The Design of Transparency Communication for Human-Multirobot Teams Book Section
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, vol. 14051, pp. 311–321, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35893-7 978-3-031-35894-4, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: AI, DTIC, Virtual Humans
@incollection{degen_design_2023,
title = {The Design of Transparency Communication for Human-Multirobot Teams},
author = {Ning Wang and David V. Pynadath and Nikolos Gurney},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/10.1007/978-3-031-35894-4_23},
doi = {10.1007/978-3-031-35894-4_23},
isbn = {978-3-031-35893-7 978-3-031-35894-4},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
booktitle = {Artificial Intelligence in HCI},
volume = {14051},
pages = {311–321},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {AI, DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}