Publications
Search
Liu, Ruying; Becerik-Gerber, Burçin; Lucas, Gale M.; Busta, Kelly
Development of a VR Training Platform for Active Shooter Incident Preparedness in Healthcare Environments via a Stakeholder-Engaged Process Proceedings Article
In: Computing in Civil Engineering 2023, pp. 45–53, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8523-1.
@inproceedings{liu_development_2024,
title = {Development of a VR Training Platform for Active Shooter Incident Preparedness in Healthcare Environments via a Stakeholder-Engaged Process},
author = {Ruying Liu and Burçin Becerik-Gerber and Gale M. Lucas and Kelly Busta},
url = {https://ascelibrary.org/doi/10.1061/9780784485231.006},
doi = {10.1061/9780784485231.006},
isbn = {978-0-7844-8523-1},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {45–53},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Seyedrezaei, Mirmahdi; Becerik-Gerber, Burcin; Lucas, Gale
Investigating the Interplay between Indoor Environmental Quality and Workers’ Health and Productivity: Preliminary Results Proceedings Article
In: Computing in Civil Engineering 2023, pp. 614–622, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8524-8.
@inproceedings{awada_investigating_2024,
title = {Investigating the Interplay between Indoor Environmental Quality and Workers’ Health and Productivity: Preliminary Results},
author = {Mohamad Awada and Mirmahdi Seyedrezaei and Burcin Becerik-Gerber and Gale Lucas},
url = {https://ascelibrary.org/doi/10.1061/9780784485248.074},
doi = {10.1061/9780784485248.074},
isbn = {978-0-7844-8524-8},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
booktitle = {Computing in Civil Engineering 2023},
pages = {614–622},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; West, Taylor Nicole; Gratch, Jonathan; Fredrickson, Barbara
Can AI Agents Help Humans to Connect? Technical Report
PsyArXiv 2023.
@techreport{prinzing_can_2023,
title = {Can AI Agents Help Humans to Connect?},
author = {Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and Taylor Nicole West and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/muq6s},
doi = {10.31234/osf.io/muq6s},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
institution = {PsyArXiv},
abstract = {This paper reports on a pre-registered experiment designed to test whether artificial agents can help people to create more moments of high-quality connection with other humans. Of four pre-registered hypotheses, we found (partial) support for only one.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
Privacy-preserving Representation Learning for Speech Understanding Miscellaneous
2023, (arXiv:2310.17194 [eess]).
@misc{tran_privacy-preserving_2023,
title = {Privacy-preserving Representation Learning for Speech Understanding},
author = {Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2310.17194},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Existing privacy-preserving speech representation learning methods target a single application domain. In this paper, we present a novel framework to anonymize utterance-level speech embeddings generated by pre-trained encoders and show its effectiveness for a range of speech classification tasks. Specifically, given the representations from a pre-trained encoder, we train a Transformer to estimate the representations for the same utterances spoken by other speakers. During inference, the extracted representations can be converted into different identities to preserve privacy. We compare the results with the voice anonymization baselines from the VoicePrivacy 2022 challenge. We evaluate our framework on speaker identification for privacy and emotion recognition, depression classification, and intent classification for utility. Our method outperforms the baselines on privacy and utility in paralinguistic tasks and achieves comparable performance for intent classification.},
note = {arXiv:2310.17194 [eess]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 979-8-4007-0321-8.
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {979-8-4007-0321-8},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 979-8-4007-0321-8.
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {979-8-4007-0321-8},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 979-8-4007-0055-2.
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {979-8-4007-0055-2},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chang, Di; Yin, Yufeng; Li, Zongjian; Tran, Minh; Soleymani, Mohammad
LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis Miscellaneous
2023, (arXiv:2308.10713 [cs]).
@misc{chang_libreface_2023,
title = {LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis},
author = {Di Chang and Yufeng Yin and Zongjian Li and Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.10713},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Facial expression analysis is an important tool for human-computer interaction. In this paper, we introduce LibreFace, an open-source toolkit for facial expression analysis. This open-source toolbox offers real-time and offline analysis of facial behavior through deep learning models, including facial action unit (AU) detection, AU intensity estimation, and facial expression recognition. To accomplish this, we employ several techniques, including the utilization of a large-scale pre-trained network, feature-wise knowledge distillation, and task-specific fine-tuning. These approaches are designed to effectively and accurately analyze facial expressions by leveraging visual information, thereby facilitating the implementation of real-time interactive applications. In terms of Action Unit (AU) intensity estimation, we achieve a Pearson Correlation Coefficient (PCC) of 0.63 on DISFA, which is 7% higher than the performance of OpenFace 2.0 while maintaining highly-efficient inference that runs two times faster than OpenFace 2.0. Despite being compact, our model also demonstrates competitive performance to state-of-the-art facial expression analysis methods on AffecNet, FFHQ, and RAF-DB. Our code will be released at https://github.com/ihp-lab/LibreFace},
note = {arXiv:2308.10713 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yin, Yufeng; Chang, Di; Song, Guoxian; Sang, Shen; Zhi, Tiancheng; Liu, Jing; Luo, Linjie; Soleymani, Mohammad
FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features Miscellaneous
2023, (arXiv:2308.12380 [cs]).
@misc{yin_fg-net_2023,
title = {FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features},
author = {Yufeng Yin and Di Chang and Guoxian Song and Shen Sang and Tiancheng Zhi and Jing Liu and Linjie Luo and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.12380},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Automatic detection of facial Action Units (AUs) allows for objective facial expression analysis. Due to the high cost of AU labeling and the limited size of existing benchmarks, previous AU detection methods tend to overfit the dataset, resulting in a significant performance loss when evaluated across corpora. To address this problem, we propose FG-Net for generalizable facial action unit detection. Specifically, FG-Net extracts feature maps from a StyleGAN2 model pre-trained on a large and diverse face image dataset. Then, these features are used to detect AUs with a Pyramid CNN Interpreter, making the training efficient and capturing essential local features. The proposed FG-Net achieves a strong generalization ability for heatmap-based AU detection thanks to the generalizable and semantic-rich features extracted from the pre-trained generative model. Extensive experiments are conducted to evaluate within- and cross-corpus AU detection with the widely-used DISFA and BP4D datasets. Compared with the state-of-the-art, the proposed method achieves superior cross-domain performance while maintaining competitive within-domain performance. In addition, FG-Net is data-efficient and achieves competitive performance even when trained on 1000 samples. Our code will be released at textbackslashurlhttps://github.com/ihp-lab/FG-Net},
note = {arXiv:2308.12380 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Filter
2023
Leitner, Maxyn; Greenwald, Eric; Wang, Ning; Montgomery, Ryan; Merchant, Chirag
Designing Game-Based Learning for High School Artificial Intelligence Education Journal Article
In: Int J Artif Intell Educ, vol. 33, no. 2, pp. 384–398, 2023, ISSN: 1560-4292, 1560-4306.
Abstract | Links | BibTeX | Tags: AI, Virtual Humans
@article{leitner_designing_2023,
title = {Designing Game-Based Learning for High School Artificial Intelligence Education},
author = {Maxyn Leitner and Eric Greenwald and Ning Wang and Ryan Montgomery and Chirag Merchant},
url = {https://link.springer.com/10.1007/s40593-022-00327-w},
doi = {10.1007/s40593-022-00327-w},
issn = {1560-4292, 1560-4306},
year = {2023},
date = {2023-06-01},
urldate = {2023-09-20},
journal = {Int J Artif Intell Educ},
volume = {33},
number = {2},
pages = {384–398},
abstract = {Abstract
Artificial Intelligence (AI) permeates every aspect of our daily lives and is no longer a subject reserved for a select few in higher education but is essential knowledge that our youth need for the future. Much is unknown about the level of AI knowledge that is age and developmentally appropriate for high school, let alone about how to teach AI to even younger learners. In this theoretical paper, we discuss the design of a game-based learning environment for high school AI education, drawing upon insights gained from a prior cognitive interview study at a STEM focused private high school. We argue that game-based learning is an excellent fit for AI education due to the commonality of problem solving in both game playing and AI.},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Artificial Intelligence (AI) permeates every aspect of our daily lives and is no longer a subject reserved for a select few in higher education but is essential knowledge that our youth need for the future. Much is unknown about the level of AI knowledge that is age and developmentally appropriate for high school, let alone about how to teach AI to even younger learners. In this theoretical paper, we discuss the design of a game-based learning environment for high school AI education, drawing upon insights gained from a prior cognitive interview study at a STEM focused private high school. We argue that game-based learning is an excellent fit for AI education due to the commonality of problem solving in both game playing and AI.
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Tran, Minh; Soleymani, Mohammad
A Speech Representation Anonymization Framework via Selective Noise Perturbation Proceedings Article
In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1–5, IEEE, Rhodes Island, Greece, 2023, ISBN: 978-1-7281-6327-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{tran_speech_2023,
title = {A Speech Representation Anonymization Framework via Selective Noise Perturbation},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10095173/},
doi = {10.1109/ICASSP49357.2023.10095173},
isbn = {978-1-7281-6327-7},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1–5},
publisher = {IEEE},
address = {Rhodes Island, Greece},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning Journal Article
In: FLAIRS, vol. 36, 2023, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, DTIC, UARC, Virtual Humans
@article{aris_learning_2023,
title = {Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/133348},
doi = {10.32473/flairs.36.133348},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-04},
journal = {FLAIRS},
volume = {36},
abstract = {This paper presents a reinforcement learning model designed to learn how to take cover on geo-specific terrains, an essential behavior component for military training simulations. Training of the models is performed on the Rapid Integration and Development Environment (RIDE) leveraging the Unity ML-Agents framework. This work expands on previous work on raycast-based agents by increasing the number of enemies from one to three. We demonstrate an automated way of generating training and testing data within geo-specific terrains. We show that replacing the action space with a more abstracted, navmesh-based waypoint movement system can increase the generality and success rate of the models while providing similar results to our previous paper's results regarding retraining across terrains. We also comprehensively evaluate the differences between these and the previous models. Finally, we show that incorporating pixels into the model's input can increase performance at the cost of longer training times.},
keywords = {CogArch, Cognitive Architecture, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Murawski, Alaine; Ramirez-Zohfeld, Vanessa; Schierer, Allison; Olvera, Charles; Mell, Johnathan; Gratch, Jonathan; Brett, Jeanne; Lindquist, Lee A.
Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers Journal Article
In: Geriatrics, vol. 8, no. 2, pp. 36, 2023, ISSN: 2308-3417, (Number: 2 Publisher: Multidisciplinary Digital Publishing Institute).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{murawski_transforming_2023,
title = {Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers},
author = {Alaine Murawski and Vanessa Ramirez-Zohfeld and Allison Schierer and Charles Olvera and Johnathan Mell and Jonathan Gratch and Jeanne Brett and Lee A. Lindquist},
url = {https://www.mdpi.com/2308-3417/8/2/36},
doi = {10.3390/geriatrics8020036},
issn = {2308-3417},
year = {2023},
date = {2023-04-01},
urldate = {2023-03-31},
journal = {Geriatrics},
volume = {8},
number = {2},
pages = {36},
abstract = {Background: Family caregivers of older people with Alzheimer’s dementia (PWD) often need to advocate and resolve health-related conflicts (e.g., determining treatment necessity, billing errors, and home health extensions). As they deal with these health system conflicts, family caregivers experience unnecessary frustration, anxiety, and stress. The goal of this research was to apply a negotiation framework to resolve real-world family caregiver–older adult conflicts. Methods: We convened an interdisciplinary team of national community-based family caregivers, social workers, geriatricians, and negotiation experts (n = 9; Illinois, Florida, New York, and California) to examine the applicability of negotiation and conflict management frameworks to three older adult–caregiver conflicts (i.e., caregiver–older adult, caregiver–provider, and caregiver–caregiver). The panel of caregivers provided scenarios and dialogue describing conflicts they experienced in these three settings. A qualitative analysis was then performed grouping the responses into a framework matrix. Results: Upon presenting the three conflicts to the caregivers, 96 responses (caregiver–senior), 75 responses (caregiver–caregiver), and 80 responses (caregiver–provider) were generated. A thematic analysis showed that the statements and responses fit the interest–rights–power (IRP) negotiation framework. Discussion: The interests–rights–power (IRP) framework, used in business negotiations, provided insight into how caregivers experienced conflict with older adults, providers, and other caregivers. Future research is needed to examine applying the IRP framework in the training of caregivers of older people with Alzheimer’s dementia.},
note = {Number: 2
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M. De; Gratch, Jonathan; Marsella, Stacy; Pelachaud, Catherine
Social Functions of Machine Emotional Expressions Journal Article
In: Proc. IEEE, pp. 1–16, 2023, ISSN: 0018-9219, 1558-2256.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{de_melo_social_2023,
title = {Social Functions of Machine Emotional Expressions},
author = {Celso M. De Melo and Jonathan Gratch and Stacy Marsella and Catherine Pelachaud},
url = {https://ieeexplore.ieee.org/document/10093227/},
doi = {10.1109/JPROC.2023.3261137},
issn = {0018-9219, 1558-2256},
year = {2023},
date = {2023-04-01},
urldate = {2023-08-04},
journal = {Proc. IEEE},
pages = {1–16},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective Journal Article
In: Philosophical Transactions of the Royal Society B: Biological Sciences, vol. 378, no. 1875, pp. 20210475, 2023, (Publisher: Royal Society).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{gratch_promise_2023,
title = {The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective},
author = {Jonathan Gratch},
url = {https://royalsocietypublishing.org/doi/abs/10.1098/rstb.2021.0475},
doi = {10.1098/rstb.2021.0475},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Philosophical Transactions of the Royal Society B: Biological Sciences},
volume = {378},
number = {1875},
pages = {20210475},
abstract = {In face-to-face interactions, parties rapidly react and adapt to each other's words, movements and expressions. Any science of face-to-face interaction must develop approaches to hypothesize and rigorously test mechanisms that explain such interdependent behaviour. Yet conventional experimental designs often sacrifice interactivity to establish experimental control. Interactive virtual and robotic agents have been offered as a way to study true interactivity while enforcing a measure of experimental control by allowing participants to interact with realistic but carefully controlled partners. But as researchers increasingly turn to machine learning to add realism to such agents, they may unintentionally distort the very interactivity they seek to illuminate, particularly when investigating the role of non-verbal signals such as emotion or active-listening behaviours. Here I discuss some of the methodological challenges that may arise when machine learning is used to model the behaviour of interaction partners. By articulating and explicitly considering these commitments, researchers can transform ‘unintentional distortions’ into valuable methodological tools that yield new insights and better contextualize existing experimental findings that rely on learning technology.
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.},
note = {Publisher: Royal Society},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-03-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lu, Shuhong; Yoon, Youngwoo; Feng, Andrew
Co-Speech Gesture Synthesis using Discrete Gesture Token Learning Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{lu_co-speech_2023,
title = {Co-Speech Gesture Synthesis using Discrete Gesture Token Learning},
author = {Shuhong Lu and Youngwoo Yoon and Andrew Feng},
url = {https://arxiv.org/abs/2303.12822},
doi = {10.48550/ARXIV.2303.12822},
year = {2023},
date = {2023-03-01},
urldate = {2023-08-04},
abstract = {Synthesizing realistic co-speech gestures is an important and yet unsolved problem for creating believable motions that can drive a humanoid robot to interact and communicate with human users. Such capability will improve the impressions of the robots by human users and will find applications in education, training, and medical services. One challenge in learning the co-speech gesture model is that there may be multiple viable gesture motions for the same speech utterance. The deterministic regression methods can not resolve the conflicting samples and may produce over-smoothed or damped motions. We proposed a two-stage model to address this uncertainty issue in gesture synthesis by modeling the gesture segments as discrete latent codes. Our method utilizes RQ-VAE in the first stage to learn a discrete codebook consisting of gesture tokens from training data. In the second stage, a two-level autoregressive transformer model is used to learn the prior distribution of residual codes conditioned on input speech context. Since the inference is formulated as token sampling, multiple gesture sequences could be generated given the same speech input using top-k sampling. The quantitative results and the user study showed the proposed method outperforms the previous methods and is able to generate realistic and diverse gesture motions.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Liu, Ruying; Seyedrezaei, Mirmahdi; Lu, Zheng; Xenakis, Matheos; Lucas, Gale; Roll, Shawn C.; Narayanan, Shrikanth
Ten questions concerning the impact of environmental stress on office workers Journal Article
In: Building and Environment, vol. 229, pp. 109964, 2023, ISSN: 0360-1323.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{awada_ten_2023,
title = {Ten questions concerning the impact of environmental stress on office workers},
author = {Mohamad Awada and Burcin Becerik-Gerber and Ruying Liu and Mirmahdi Seyedrezaei and Zheng Lu and Matheos Xenakis and Gale Lucas and Shawn C. Roll and Shrikanth Narayanan},
url = {https://www.sciencedirect.com/science/article/pii/S0360132322011945},
doi = {10.1016/j.buildenv.2022.109964},
issn = {0360-1323},
year = {2023},
date = {2023-02-01},
urldate = {2023-03-31},
journal = {Building and Environment},
volume = {229},
pages = {109964},
abstract = {We regularly face stress during our everyday activities, to the extent that stress is recognized by the World Health Organization as the epidemic of the 21st century. Stress is how humans respond physically and psychologically to adjustments, experiences, conditions, and circumstances in their lives. While there are many reasons for stress, work and job pressure remain the main cause. Thus, companies are increasingly interested in creating healthier, more comfortable, and stress-free offices for their workers. The indoor environment can induce environmental stress when it cannot satisfy the individual needs for health and comfort. In fact, office environmental conditions (e.g., thermal, and indoor air conditions, lighting, and noise) and interior design parameters (e.g., office layout, colors, furniture, access to views, distance to window, personal control and biophilic design) have been found to affect office workers' stress levels. A line of research based on the stress recovery theory offers new insights for establishing offices that limit environmental stress and help with work stress recovery. To that end, this paper answers ten questions that explore the relation between the indoor office-built environment and stress levels among workers. The answers to the ten questions are based on an extensive literature review to draw conclusions from what has been achieved to date. Thus, this study presents a foundation for future environmental stress related research in offices.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Singh, Rashmi; Rodrigues, Patrick Borges; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
In: Advanced Engineering Informatics, vol. 55, pp. 101837, 2023, ISSN: 1474-0346.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{adami_participants_2023,
title = {Participants matter: Effectiveness of VR-based training on the knowledge, trust in the robot, and self-efficacy of construction workers and university students},
author = {Pooya Adami and Rashmi Singh and Patrick Borges Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S1474034622002956},
doi = {10.1016/j.aei.2022.101837},
issn = {1474-0346},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Advanced Engineering Informatics},
volume = {55},
pages = {101837},
abstract = {Virtual Reality (VR)-based training has gained attention from the scientific community in the Architecture, Engineering, and Construction (AEC) industry as a cost-effective and safe method that eliminates the safety risks that may impose on workers during the training compared to traditional training methods (e.g., in-person hands-on training, apprenticeship). Although researchers have developed VR-based training for construction workers, some have recruited students rather than workers to understand the effect of their VR-based training. However, students are different from construction workers in many ways, which can threaten the validity of such studies. Hence, research is needed to investigate the extent to which the findings of a VR-based training study are contingent on whether students or construction workers were used as the study sample. This paper strives to compare the effectiveness of VR-based training on university students’ and construction workers’ knowledge acquisition, trust in the robot, and robot operation self-efficacy in remote operation of a construction robot. Twenty-five construction workers and twenty-five graduate construction engineering students were recruited to complete a VR-based training for remote operating a demolition robot. We used quantitative analyses to answer our research questions. Our study shows that the results are dependent on the target sample in that students gained more knowledge, whereas construction workers gained more trust in the robot and more self-efficacy in robot operation. These findings suggest that the effectiveness of VR-based training on students may not necessarily associate with its effectiveness on construction workers.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lei, Su; Gratch, Jonathan
Emotional Expressivity is a Reliable Signal of Surprise Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{lei_emotional_2023,
title = {Emotional Expressivity is a Reliable Signal of Surprise},
author = {Su Lei and Jonathan Gratch},
doi = {10.1109/TAFFC.2023.3234015},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {We consider the problem of inferring what happened to a person in a social task from momentary facial reactions. To approach this, we introduce several innovations. First, rather than predicting what (observers think) someone feels, we predict objective features of the event that immediately preceded the facial reactions. Second, we draw on appraisal theory, a key psychological theory of emotion, to characterize features of this immediately-preceded event. Specifically, we explore if facial expressions reveal if the event is expected, goal-congruent, and norm-compatible. Finally, we argue that emotional expressivity serves as a better feature for characterizing momentary expressions than traditional facial features. Specifically, we use supervised machine learning to predict third-party judgments of emotional expressivity with high accuracy, and show this model improves inferences about the nature of the event that preceded an emotional reaction. Contrary to common sense, “genuine smiles” failed to predict if an event advanced a person's goals. Rather, expressions best revealed if an event violated expectations. We discussed the implications of these findings for the interpretation of facial displays and potential limitations that could impact the generality of these findings.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale M.; Gratch, Jonathan
Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{chawla_towards_2023,
title = {Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale M. Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/abstract/document/10021626},
doi = {10.1109/TAFFC.2023.3238007},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {Negotiation is a complex social interaction that encapsulates emotional encounters in human decision-making. Virtual agents that can negotiate with humans by the means of language are useful in pedagogy and conversational AI. To advance the development of such agents, we explore the role of emotion in the prediction of two important subjective goals in a negotiation – outcome satisfaction and partner perception. We devise ways to measure and compare different degrees of emotion expression in negotiation dialogues, consisting of emoticon, lexical, and contextual variables. Through an extensive analysis of a large-scale dataset in chat-based negotiations, we find that incorporating emotion expression explains significantly more variance, above and beyond the demographics and personality traits of the participants. Further, our temporal analysis reveals that emotive information from both early and later stages of the negotiation contributes to this prediction, indicating the need for a continual learning model of capturing emotion for automated agents. Finally, we extend our analysis to another dataset, showing promise that our findings generalize to more complex scenarios. We conclude by discussing our insights, which will be helpful for designing adaptive negotiation agents that interact through realistic communication interfaces.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale M.; Mell, Johnathan; Boberg, Jill; Zenone, Forrest; Visser, Ewart J.; Tossell, Chad; Seech, Todd
Customizing virtual interpersonal skills training applications may not improve trainee performance Journal Article
In: Sci Rep, vol. 13, no. 1, pp. 78, 2023, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{lucas_customizing_2023,
title = {Customizing virtual interpersonal skills training applications may not improve trainee performance},
author = {Gale M. Lucas and Johnathan Mell and Jill Boberg and Forrest Zenone and Ewart J. Visser and Chad Tossell and Todd Seech},
url = {https://www.nature.com/articles/s41598-022-27154-2},
doi = {10.1038/s41598-022-27154-2},
issn = {2045-2322},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {13},
number = {1},
pages = {78},
abstract = {While some theoretical perspectives imply that the context of a virtual training should be customized to match the intended context where those skills would ultimately be applied, others suggest this might not be necessary for learning. It is important to determine whether manipulating context matters for performance in training applications because customized virtual training systems made for specific use cases are more costly than generic “off-the-shelf” ones designed for a broader set of users. Accordingly, we report a study where military cadets use a virtual platform to practice their negotiation skills, and are randomly assigned to one of two virtual context conditions: military versus civilian. Out of 28 measures capturing performance in the negotiation, there was only one significant result: cadets in the civilian condition politely ask the agent to make an offer significantly more than those in the military condition. These results imply that—for this interpersonal skills application, and perhaps ones like it—virtual context may matter very little for performance during social skills training, and that commercial systems may yield real benefits to military scenarios with little-to-no modification.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Rizzo, Albert A; Hartholt, Arno
Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application Proceedings Article
In: 2023.
Abstract | BibTeX | Tags: Virtual Humans, VR
@inproceedings{mozgai_persuasive_2023,
title = {Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application},
author = {Sharon Mozgai and Albert A Rizzo and Arno Hartholt},
year = {2023},
date = {2023-01-01},
abstract = {We are demoing Battle Buddy, an mHealth application designed to support access to physical and mental wellness content as well as safety planning for U.S. military veterans. This virtual human interface will collect multimodal data through passive sensors native to popular wearables (e.g., Apple Watch) and deliver adaptive multimedia content specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Battle Buddy can deliver health interventions matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). All interactions were specifically designed to engage and motivate by employing the persuasive strategies of (1) personalization, (2) self-monitoring, (3) tunneling, (4) suggestion, and (5) expertise.},
keywords = {Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Difede, JoAnn; Rothbaum, Barbara O.; Rizzo, Albert A.; Wyka, Katarzyna; Spielman, Lisa; Reist, Christopher; Roy, Michael J.; Jovanovic, Tanja; Norrholm, Seth D.; Cukor, Judith; Olden, Megan; Glatt, Charles E.; Lee, Francis S.
In: Transl Psychiatry, vol. 12, no. 1, pp. 299, 2022, ISSN: 2158-3188.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, Virtual Humans
@article{difede_enhancing_2022,
title = {Enhancing exposure therapy for posttraumatic stress disorder (PTSD): a randomized clinical trial of virtual reality and imaginal exposure with a cognitive enhancer},
author = {JoAnn Difede and Barbara O. Rothbaum and Albert A. Rizzo and Katarzyna Wyka and Lisa Spielman and Christopher Reist and Michael J. Roy and Tanja Jovanovic and Seth D. Norrholm and Judith Cukor and Megan Olden and Charles E. Glatt and Francis S. Lee},
url = {https://www.nature.com/articles/s41398-022-02066-x},
doi = {10.1038/s41398-022-02066-x},
issn = {2158-3188},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-13},
journal = {Transl Psychiatry},
volume = {12},
number = {1},
pages = {299},
abstract = {Abstract Posttraumatic stress disorder (PTSD) is a significant public health issue. Yet, there are limited treatment options and no data to suggest which treatment will work for whom. We tested the efficacy of virtual reality exposure (VRE) or prolonged imaginal exposure (PE), augmented with D-cycloserine (DCS) for combat-related PTSD. As an exploratory aim, we examined whether brain-derived neurotrophic factor (BDNF) and fatty acid amide hydrolase (FAAH) moderated treatment response. Military personnel with PTSD ( n = 192) were recruited into a multisite double-blind randomized controlled trial to receive nine weeks of VRE or PE, with DCS or placebo. Primary outcome was the improvement in symptom severity. Randomization was stratified by comorbid depression (MDD) and site. Participants in both VRE and PE showed similar meaningful clinical improvement with no difference between the treatment groups. A significant interaction ( p = 0.45) suggested VRE was more effective for depressed participants (CAPS difference M = 3.51 [95% CI 1.17–5.86]},
keywords = {DTIC, MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Becerik-Gerber, Burçin; Lucas, Gale; Aryal, Ashrant; Awada, Mohamad; Bergés, Mario; Billington, Sarah L; Boric-Lubecke, Olga; Ghahramani, Ali; Heydarian, Arsalan; Jazizadeh, Farrokh; Liu, Ruying; Zhu, Runhe; Marks, Frederick; Roll, Shawn; Seyedrezaei, Mirmahdi; Taylor, John E.; Höelscher, Christoph; Khan, Azam; Langevin, Jared; Mauriello, Matthew Louis; Murnane, Elizabeth; Noh, Haeyoung; Pritoni, Marco; Schaumann, Davide; Zhao, Jie
Ten questions concerning human-building interaction research for improving the quality of life Journal Article
In: Building and Environment, vol. 226, pp. 109681, 2022, ISSN: 0360-1323.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{becerik-gerber_ten_2022,
title = {Ten questions concerning human-building interaction research for improving the quality of life},
author = {Burçin Becerik-Gerber and Gale Lucas and Ashrant Aryal and Mohamad Awada and Mario Bergés and Sarah L Billington and Olga Boric-Lubecke and Ali Ghahramani and Arsalan Heydarian and Farrokh Jazizadeh and Ruying Liu and Runhe Zhu and Frederick Marks and Shawn Roll and Mirmahdi Seyedrezaei and John E. Taylor and Christoph Höelscher and Azam Khan and Jared Langevin and Matthew Louis Mauriello and Elizabeth Murnane and Haeyoung Noh and Marco Pritoni and Davide Schaumann and Jie Zhao},
url = {https://www.sciencedirect.com/science/article/pii/S0360132322009118},
doi = {10.1016/j.buildenv.2022.109681},
issn = {0360-1323},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Building and Environment},
volume = {226},
pages = {109681},
abstract = {This paper seeks to address ten questions that explore the burgeoning field of Human-Building Interaction (HBI), an interdisciplinary field that represents the next frontier in convergent research and innovation to enable the dynamic interplay of human and building interactional intelligence. The field of HBI builds on several existing efforts in historically separate research fields/communities and aims to understand how buildings affect human outcomes and experiences, as well as how humans interact with, adapt to, and affect the built environment and its systems, to support buildings that can learn, enable adaptation, and evolve at different scales to improve the quality-of-life of its users while optimizing resource usage and service availability. Questions were developed by a diverse group of researchers with backgrounds in design, engineering, computer science, social science, and health science. Answers to these questions draw conclusions from what has been achieved to date as reported in the available literature and establish a foundation for future HBI research. This paper aims to encourage interdisciplinary collaborations in HBI research to change the way people interact with and perceive technology within the context of buildings and inform the design, construction, and operation of next-generation, intelligent built environments. In doing so, HBI research can realize a myriad of benefits for human users, including improved productivity, health, cognition, convenience, and comfort, all of which are essential to societal well-being.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Becerik-Gerber, Burcin; Lucas, Gale; Aryal, Ashrant; Awada, Mohamad; Bergés, Mario; Billington, Sarah; Boric-Lubecke, Olga; Ghahramani, Ali; Heydarian, Arsalan; Höelscher, Christoph; Jazizadeh, Farrokh; Khan, Azam; Langevin, Jared; Liu, Ruying; Marks, Frederick; Mauriello, Matthew Louis; Murnane, Elizabeth; Noh, Haeyoung; Pritoni, Marco; Roll, Shawn; Schaumann, Davide; Seyedrezaei, Mirmahdi; Taylor, John E.; Zhao, Jie; Zhu, Runhe
The field of human building interaction for convergent research and innovation for intelligent built environments Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 22092, 2022, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{becerik-gerber_field_2022,
title = {The field of human building interaction for convergent research and innovation for intelligent built environments},
author = {Burcin Becerik-Gerber and Gale Lucas and Ashrant Aryal and Mohamad Awada and Mario Bergés and Sarah Billington and Olga Boric-Lubecke and Ali Ghahramani and Arsalan Heydarian and Christoph Höelscher and Farrokh Jazizadeh and Azam Khan and Jared Langevin and Ruying Liu and Frederick Marks and Matthew Louis Mauriello and Elizabeth Murnane and Haeyoung Noh and Marco Pritoni and Shawn Roll and Davide Schaumann and Mirmahdi Seyedrezaei and John E. Taylor and Jie Zhao and Runhe Zhu},
url = {https://www.nature.com/articles/s41598-022-25047-y},
doi = {10.1038/s41598-022-25047-y},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {22092},
abstract = {Human-Building Interaction (HBI) is a convergent field that represents the growing complexities of the dynamic interplay between human experience and intelligence within built environments. This paper provides core definitions, research dimensions, and an overall vision for the future of HBI as developed through consensus among 25 interdisciplinary experts in a series of facilitated workshops. Three primary areas contribute to and require attention in HBI research: humans (human experiences, performance, and well-being), buildings (building design and operations), and technologies (sensing, inference, and awareness). Three critical interdisciplinary research domains intersect these areas: control systems and decision making, trust and collaboration, and modeling and simulation. Finally, at the core, it is vital for HBI research to center on and support equity, privacy, and sustainability. Compelling research questions are posed for each primary area, research domain, and core principle. State-of-the-art methods used in HBI studies are discussed, and examples of original research are offered to illustrate opportunities for the advancement of HBI research.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Feng, Andrew; Shin, Samuel; Yoon, Youngwoo
A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos Proceedings Article
In: Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games, pp. 1–7, ACM, Guanajuato Mexico, 2022, ISBN: 978-1-4503-9888-6.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{feng_tool_2022,
title = {A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos},
author = {Andrew Feng and Samuel Shin and Youngwoo Yoon},
url = {https://dl.acm.org/doi/10.1145/3561975.3562953},
doi = {10.1145/3561975.3562953},
isbn = {978-1-4503-9888-6},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-04},
booktitle = {Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games},
pages = {1–7},
publisher = {ACM},
address = {Guanajuato Mexico},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pauw, Lisanne S.; Sauter, Disa A.; Kleef, Gerben A.; Lucas, Gale M.; Gratch, Jonathan; Fischer, Agneta H.
The avatar will see you now: Support from a virtual human provides socio-emotional benefits Journal Article
In: Computers in Human Behavior, vol. 136, pp. 107368, 2022, ISSN: 07475632.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{pauw_avatar_2022,
title = {The avatar will see you now: Support from a virtual human provides socio-emotional benefits},
author = {Lisanne S. Pauw and Disa A. Sauter and Gerben A. Kleef and Gale M. Lucas and Jonathan Gratch and Agneta H. Fischer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S074756322200190X},
doi = {10.1016/j.chb.2022.107368},
issn = {07475632},
year = {2022},
date = {2022-11-01},
urldate = {2022-09-28},
journal = {Computers in Human Behavior},
volume = {136},
pages = {107368},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
Platforms and Tools for SIA Research and Development Book Section
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 261–304, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@incollection{hartholt_platforms_2022,
title = {Platforms and Tools for SIA Research and Development},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1145/3563659.3563668},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {261–304},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Lugrin, Birgit; Pelachaud, Catherine; André, Elisabeth; Aylett, Ruth; Bickmore, Timothy; Breazeal, Cynthia; Broekens, Joost; Dautenhahn, Kerstin; Gratch, Jonathan; Kopp, Stefan; Nadel, Jacqueline; Paiva, Ana; Wykowska, Agnieszka
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 561–626, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{lugrin_challenge_2022,
title = {Challenge Discussion on Socially Interactive Agents: Considerations on Social Interaction, Computational Architectures, Evaluation, and Ethics},
author = {Birgit Lugrin and Catherine Pelachaud and Elisabeth André and Ruth Aylett and Timothy Bickmore and Cynthia Breazeal and Joost Broekens and Kerstin Dautenhahn and Jonathan Gratch and Stefan Kopp and Jacqueline Nadel and Ana Paiva and Agnieszka Wykowska},
url = {https://doi.org/10.1145/3563659.3563677},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {561–626},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Yin, Yufeng; Xu, Jiashu; Zu, Tianxin; Soleymani, Mohammad
X-Norm: Exchanging Normalization Parameters for Bimodal Fusion Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 605–614, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_x-norm_2022,
title = {X-Norm: Exchanging Normalization Parameters for Bimodal Fusion},
author = {Yufeng Yin and Jiashu Xu and Tianxin Zu and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3536221.3556581},
doi = {10.1145/3536221.3556581},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {605–614},
publisher = {ACM},
address = {Bengaluru India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lu, Shuhong; Feng, Andrew
The DeepMotion entry to the GENEA Challenge 2022 Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 790–796, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{lu_deepmotion_2022,
title = {The DeepMotion entry to the GENEA Challenge 2022},
author = {Shuhong Lu and Andrew Feng},
url = {https://dl.acm.org/doi/10.1145/3536221.3558059},
doi = {10.1145/3536221.3558059},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {790–796},
publisher = {ACM},
address = {Bengaluru India},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Fast, Nathanael J.
The power to harm: AI assistants pave the way to unethical behavior Journal Article
In: Current Opinion in Psychology, vol. 47, pp. 101382, 2022, ISSN: 2352250X.
Links | BibTeX | Tags: AI, DTIC, Virtual Humans
@article{gratch_power_2022,
title = {The power to harm: AI assistants pave the way to unethical behavior},
author = {Jonathan Gratch and Nathanael J. Fast},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2352250X22001014},
doi = {10.1016/j.copsyc.2022.101382},
issn = {2352250X},
year = {2022},
date = {2022-10-01},
urldate = {2022-09-28},
journal = {Current Opinion in Psychology},
volume = {47},
pages = {101382},
keywords = {AI, DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
Cognitive performance, creativity and stress levels of neurotypical young adults under different white noise levels Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 14566, 2022, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{awada_cognitive_2022,
title = {Cognitive performance, creativity and stress levels of neurotypical young adults under different white noise levels},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://www.nature.com/articles/s41598-022-18862-w},
doi = {10.1038/s41598-022-18862-w},
issn = {2045-2322},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {14566},
abstract = {Noise is often considered a distractor; however recent studies suggest that sub-attentive individuals or individuals diagnosed with attention deficit hyperactivity disorder can benefit from white noise to enhance their cognitive performance. Research regarding the effect of white noise on neurotypical adults presents mixed results, thus the implications of white noise on the neurotypical population remain unclear. Thus, this study investigates the effect of 2 white noise conditions, white noise level at 45 dB and white noise level at 65 dB, on the cognitive performance, creativity, and stress levels of neurotypical young adults in a private office space. These conditions are compared to a baseline condition where participants are exposed to the office ambient noise. Our findings showed that the white noise level at 45 dB resulted in better cognitive performance in terms of sustained attention, accuracy, and speed of performance as well as enhanced creativity and lower stress levels. On the other hand, the 65 dB white noise condition led to improved working memory but higher stress levels, which leads to the conclusion that different tasks might require different noise levels for optimal performance. These results lay the foundation for the integration of white noise into office workspaces as a tool to enhance office workers’ performance.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: Yearb Med Inform, vol. 31, no. 1, pp. 226–227, 2022, ISSN: 0943-4747, 2364-0502, (Publisher: Georg Thieme Verlag KG).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{de_melo_heuristic_2022,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso Melo and Jonathan Gratch and Frank Krueger},
url = {http://www.thieme-connect.de/DOI/DOI?10.1055/s-0042-1742544},
doi = {10.1055/s-0042-1742544},
issn = {0943-4747, 2364-0502},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
journal = {Yearb Med Inform},
volume = {31},
number = {1},
pages = {226–227},
abstract = {he authors conducted a study of how human interaction with machines needs to be studied, given the advent of intelligent systems in everyday life (such as autonomous vehicles) and how COVID-19 experiences shape human altruistic responses to machines. The authors correctly claim that more study of how humans can collaborate, and their attitudes and behavior toward machines differs from social norms with humans. They make use of the ‘Computers as Social Actors’ theory of Reeves and Nass (1996), which was influential in human computer and robot interaction research. It argues that people heuristically treat machines like people, and that encouraging intuitive thinking, in contrast to deliberation, led to increased cooperation in non-strategic settings. The authors are the first to apply and test this with concrete cognitive studies. The dictator game is used to measure altruism; the user has options to give tokens to another user (in this case the computer or a ‘human’ (both delivered by computer message to obscure the source). 186 participants were used as senders, across 40 US states, and provided a diverse sample. They were administered the abbreviated Post-Traumatic Stress Disorder (PTSD) checklist (to measure COVID-19 impact), and three subjective scales to gain insight on mechanisms. These were the Cognitive Reflection test to measure if those impacted engage in reduced reflection, i.e., more intuitive thinking, the Faith in Technology scale, and the Moral Foundations Questionnaire. Results showed a reduction in the usual bias against fairness toward machines the more the user had been impacted by COVID-19. There were also sharp increases in intuitive (and incorrect) thinking and faith in technology among the most highly affected group. The authors through multiple mediation analysis showed that faith in technology and heuristic thinking mediate the offer bias. They also caution that in times of stress the disproportional impact of COVID-19 on vulnerable groups leads to the need for ethical guidelines and regulations to ensure altruism/cooperation shown to machines is well deserved. They also point out the factors such as individual stress propensity, education level, and socioeconomic status could make individuals susceptible to heuristic thinking, and other social norms such as reciprocity, trust and fairness may also shape collaboration with machines.},
note = {Publisher: Georg Thieme Verlag KG},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Proceedings Article
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
A Pre-Trained Audio-Visual Transformer for Emotion Recognition Proceedings Article
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4698–4702, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-6654-0540-9.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{tran_pre-trained_2022,
title = {A Pre-Trained Audio-Visual Transformer for Emotion Recognition},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9747278/},
doi = {10.1109/ICASSP43922.2022.9747278},
isbn = {978-1-6654-0540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {4698–4702},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots Journal Article
In: J. Comput. Civ. Eng., vol. 36, no. 3, pp. 04022006, 2022, ISSN: 0887-3801, 1943-5487.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans, VR
@article{adami_impact_2022,
title = {Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0001016},
doi = {10.1061/(ASCE)CP.1943-5487.0001016},
issn = {0887-3801, 1943-5487},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
journal = {J. Comput. Civ. Eng.},
volume = {36},
number = {3},
pages = {04022006},
keywords = {DTIC, UARC, Virtual Humans, VR},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
Abstract | BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902–1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {AI, DTIC, Integration Technology, Machine Learning, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Fujiwara, Ken; Hoegen, Rens; Gratch, Jonathan; Dunbar, Norah E.
Synchrony facilitates altruistic decision making for non-human avatars Journal Article
In: Computers in Human Behavior, vol. 128, pp. 107079, 2022, ISSN: 07475632.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{fujiwara_synchrony_2022,
title = {Synchrony facilitates altruistic decision making for non-human avatars},
author = {Ken Fujiwara and Rens Hoegen and Jonathan Gratch and Norah E. Dunbar},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0747563221004027},
doi = {10.1016/j.chb.2021.107079},
issn = {07475632},
year = {2022},
date = {2022-03-01},
urldate = {2022-09-28},
journal = {Computers in Human Behavior},
volume = {128},
pages = {107079},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Visser, Ewart J.; Topoglu, Yigit; Joshi, Shawn; Krueger, Frank; Phillips, Elizabeth; Gratch, Jonathan; Tossell, Chad C.; Ayaz, Hasan
Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance Journal Article
In: Sensors, vol. 22, no. 3, pp. 1287, 2022, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@article{de_visser_designing_2022,
title = {Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance},
author = {Ewart J. Visser and Yigit Topoglu and Shawn Joshi and Frank Krueger and Elizabeth Phillips and Jonathan Gratch and Chad C. Tossell and Hasan Ayaz},
url = {https://www.mdpi.com/1424-8220/22/3/1287},
doi = {10.3390/s22031287},
issn = {1424-8220},
year = {2022},
date = {2022-02-01},
urldate = {2022-09-28},
journal = {Sensors},
volume = {22},
number = {3},
pages = {1287},
abstract = {To understand how to improve interactions with dog-like robots, we evaluated the importance of “dog-like” framing and physical appearance on interaction, hypothesizing multiple interactive benefits of each. We assessed whether framing Aibo as a puppy (i.e., in need of development) versus simply a robot would result in more positive responses and interactions. We also predicted that adding fur to Aibo would make it appear more dog-like, likable, and interactive. Twenty-nine participants engaged with Aibo in a 2 × 2 (framing × appearance) design by issuing commands to the robot. Aibo and participant behaviors were monitored per second, and evaluated via an analysis of commands issued, an analysis of command blocks (i.e., chains of commands), and using a T-pattern analysis of participant behavior. Participants were more likely to issue the “Come Here” command than other types of commands. When framed as a puppy, participants used Aibo’s dog name more often, praised it more, and exhibited more unique, interactive, and complex behavior with Aibo. Participants exhibited the most smiling and laughing behaviors with Aibo framed as a puppy without fur. Across conditions, after interacting with Aibo, participants felt Aibo was more trustworthy, intelligent, warm, and connected than at their initial meeting. This study shows the benefits of introducing a socially robotic agent with a particular frame and importance on realism (i.e., introducing the robot dog as a puppy) for more interactive engagement.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Baarslag, Tim; Kaisers, Michael; Gerding, Enrico H.; Jonker, Catholijn M.; Gratch, Jonathan
In: Karagözoğlu, Emin; Hyndman, Kyle B. (Ed.): Bargaining, pp. 387–406, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-76665-8 978-3-030-76666-5.
Links | BibTeX | Tags: Virtual Humans
@incollection{baarslag_self-sufficient_2022,
title = {Self-sufficient, Self-directed, and Interdependent Negotiation Systems: A Roadmap Toward Autonomous Negotiation Agents},
author = {Tim Baarslag and Michael Kaisers and Enrico H. Gerding and Catholijn M. Jonker and Jonathan Gratch},
editor = {Emin Karagözoğlu and Kyle B. Hyndman},
url = {https://link.springer.com/10.1007/978-3-030-76666-5_18},
doi = {10.1007/978-3-030-76666-5_18},
isbn = {978-3-030-76665-8 978-3-030-76666-5},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-27},
booktitle = {Bargaining},
pages = {387–406},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Hoegen, Jessie; DeVault, David; Gratch, Jonathan
Exploring the Function of Expressions in Negotiation: the DyNego-WOZ Corpus Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2022, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{hoegen_exploring_2022,
title = {Exploring the Function of Expressions in Negotiation: the DyNego-WOZ Corpus},
author = {Jessie Hoegen and David DeVault and Jonathan Gratch},
doi = {10.1109/TAFFC.2022.3223030},
issn = {1949-3045},
year = {2022},
date = {2022-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {For affective computing to have an impact outside the laboratory, facial expressions must be studied in rich naturalistic situations. We argue negotiations are one such situation as they are ubiquitous in daily life, often evoke strong emotions, and perceived emotion shapes decisions and outcomes. Negotiations are a growing focus in AI research and applications, including agents that negotiate directly with people and attempt to use affective information. We introduce the DyNego-WOZ Corpus, which includes dyadic negotiation between participants and wizard-controlled virtual humans. We demonstrate the value of this corpus to the affective computing community by examining participants' facial expressions in response to a virtual human negotiation partner. We show that people's facial expressions typically co-occur with the end of their partner's speech (suggesting they reflect a reaction to the content of this speech), that these reactions do not correspond to prototypical emotional expressions, and that these reactions can help predict the expresser's subsequent action. We highlight challenges in working with such naturalistic data, including difficulties of expression recognition during speech, and the extreme variability of expressions, both across participants and within a negotiation. Our findings reinforce arguments that facial expressions convey more than emotional state but serve important communicative functions.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Shi, Weiyan; Zhang, Jingwen; Lucas, Gale; Yu, Zhou; Gratch, Jonathan
Social Influence Dialogue Systems: A Survey of Datasets and Models For Social Influence Tasks Journal Article
In: 2022, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{chawla_social_2022,
title = {Social Influence Dialogue Systems: A Survey of Datasets and Models For Social Influence Tasks},
author = {Kushal Chawla and Weiyan Shi and Jingwen Zhang and Gale Lucas and Zhou Yu and Jonathan Gratch},
url = {https://arxiv.org/abs/2210.05664},
doi = {10.48550/ARXIV.2210.05664},
year = {2022},
date = {2022-01-01},
urldate = {2023-08-22},
abstract = {Dialogue systems capable of social influence such as persuasion, negotiation, and therapy, are essential for extending the use of technology to numerous realistic scenarios. However, existing research primarily focuses on either task-oriented or open-domain scenarios, a categorization that has been inadequate for capturing influence skills systematically. There exists no formal definition or category for dialogue systems with these skills and data-driven efforts in this direction are highly limited. In this work, we formally define and introduce the category of social influence dialogue systems that influence users' cognitive and emotional responses, leading to changes in thoughts, opinions, and behaviors through natural conversations. We present a survey of various tasks, datasets, and methods, compiling the progress across seven diverse domains. We discuss the commonalities and differences between the examined systems, identify limitations, and recommend future directions. This study serves as a comprehensive reference for social influence dialogue systems to inspire more dedicated research and discussion in this emerging area.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2021
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Proceedings Article
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-6654-3311-2.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, Virtual Humans
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-6654-3311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1–12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {DTIC, Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Bradley, Ellen; Matvey, Michelle; Woolley, Joshua; Soleymani, Mohammad
Modeling Dynamics of Facial Behavior for Mental Health Assessment Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–5, IEEE, Jodhpur, India, 2021, ISBN: 978-1-6654-3176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{tran_modeling_2021,
title = {Modeling Dynamics of Facial Behavior for Mental Health Assessment},
author = {Minh Tran and Ellen Bradley and Michelle Matvey and Joshua Woolley and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9666955/},
doi = {10.1109/FG52635.2021.9666955},
isbn = {978-1-6654-3176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–5},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Wu, Yizhen; Soleymani, Mohammad
Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–8, IEEE, Jodhpur, India, 2021, ISBN: 978-1-6654-3176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_self-supervised_2021,
title = {Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection},
author = {Yufeng Yin and Liupei Lu and Yizhen Wu and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9667048/},
doi = {10.1109/FG52635.2021.9667048},
isbn = {978-1-6654-3176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–8},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Schuller, Bjorn W.; Picard, Rosalind; Andre, Elisabeth; Gratch, Jonathan; Tao, Jianhua
Intelligent Signal Processing for Affective Computing [From the Guest Editors] Journal Article
In: IEEE Signal Process. Mag., vol. 38, no. 6, pp. 9–11, 2021, ISSN: 1053-5888, 1558-0792.
Links | BibTeX | Tags: Emotions, Virtual Humans
@article{schuller_intelligent_2021,
title = {Intelligent Signal Processing for Affective Computing [From the Guest Editors]},
author = {Bjorn W. Schuller and Rosalind Picard and Elisabeth Andre and Jonathan Gratch and Jianhua Tao},
url = {https://ieeexplore.ieee.org/document/9591500/},
doi = {10.1109/MSP.2021.3096415},
issn = {1053-5888, 1558-0792},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-29},
journal = {IEEE Signal Process. Mag.},
volume = {38},
number = {6},
pages = {9–11},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kontogiorgos, Dimosthenis; Tran, Minh; Gustafson, Joakim; Soleymani, Mohammad
A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures Proceedings Article
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 112–120, ACM, Montréal QC Canada, 2021, ISBN: 978-1-4503-8481-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{kontogiorgos_systematic_2021,
title = {A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures},
author = {Dimosthenis Kontogiorgos and Minh Tran and Joakim Gustafson and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3462244.3479887},
doi = {10.1145/3462244.3479887},
isbn = {978-1-4503-8481-0},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {112–120},
publisher = {ACM},
address = {Montréal QC Canada},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Xiao, Yao; Xu, Zhi; Cai, Kaijie; Jiang, Haonan; Gratch, Jonathan; Soleymani, Mohammad
Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-6654-0019-0.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_contrastive_2021,
title = {Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition},
author = {Yufeng Yin and Liupei Lu and Yao Xiao and Zhi Xu and Kaijie Cai and Haonan Jiang and Jonathan Gratch and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9597453/},
doi = {10.1109/ACII52823.2021.9597453},
isbn = {978-1-6654-0019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.)
1, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
Links | BibTeX | Tags: Dialogue, Virtual Humans
@book{lugrin_handbook_2021,
title = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 1: Methods, Behavior, Cognition},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/book/10.1145/3477322},
doi = {10.1145/3477322},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {Dialogue, Virtual Humans},
pubstate = {published},
tppubtype = {book}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale; Gratch, Jonathan
Towards Emotion-Aware Agents For Negotiation Dialogues Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-6654-0019-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{chawla_towards_2021,
title = {Towards Emotion-Aware Agents For Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9597427/},
doi = {10.1109/ACII52823.2021.9597427},
isbn = {978-1-6654-0019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-27},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Lucas, Gale
Rapport Between Humans and Socially Interactive Agents Book Section
In: Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.): The Handbook on Socially Interactive Agents, pp. 433–462, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
Links | BibTeX | Tags: Virtual Humans
@incollection{gratch_rapport_2021,
title = {Rapport Between Humans and Socially Interactive Agents},
author = {Jonathan Gratch and Gale Lucas},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/10.1145/3477322.3477335},
doi = {10.1145/3477322.3477335},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {The Handbook on Socially Interactive Agents},
pages = {433–462},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}