Publications
Search
Barrett, Trevor; Faulk, Robert; Sergeant, Army Master; Boberg, Jill; Bartels, Matthew; Colonel, Marine Lieutenant; Saxon, Leslie A.
Force plate assessments in reconnaissance marine training company Journal Article
In: BMC Sports Sci Med Rehabil, vol. 16, no. 1, pp. 16, 2024, ISSN: 2052-1847.
@article{barrett_force_2024,
title = {Force plate assessments in reconnaissance marine training company},
author = {Trevor Barrett and Robert Faulk and Army Master Sergeant and Jill Boberg and Matthew Bartels and Marine Lieutenant Colonel and Leslie A. Saxon},
url = {https://bmcsportsscimedrehabil.biomedcentral.com/articles/10.1186/s13102-023-00796-z},
doi = {10.1186/s13102-023-00796-z},
issn = {2052-1847},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-22},
journal = {BMC Sports Sci Med Rehabil},
volume = {16},
number = {1},
pages = {16},
abstract = {Abstract
The ability to obtain dynamic movement assessments using force plate technology holds the promise of providing more detailed knowledge of the strength, balance and forces generated by active-duty military personnel. To date, there are not well-defined use cases for implementation of force plate assessments in military training environments. We sought to determine if force plate technology assessments could provide additional insights, related to the likelihood of graduation, beyond that provided by traditional physical fitness tests (PFT’s), in an elite Marine training school. Serial force plate measures were also obtained on those Marines successfully completing training to determine if consistent measures reflecting the effects of training on muscle skeletal load-over-time could be accurately measured. A pre-training force plate assessment performed in 112 Marines did not predict graduation rates. For Marines who successfully completed the course, serial measures obtained throughout training were highly variable for each individual and no firm conclusions could be drawn related to load imposed or the fitness attained during training.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
The ability to obtain dynamic movement assessments using force plate technology holds the promise of providing more detailed knowledge of the strength, balance and forces generated by active-duty military personnel. To date, there are not well-defined use cases for implementation of force plate assessments in military training environments. We sought to determine if force plate technology assessments could provide additional insights, related to the likelihood of graduation, beyond that provided by traditional physical fitness tests (PFT’s), in an elite Marine training school. Serial force plate measures were also obtained on those Marines successfully completing training to determine if consistent measures reflecting the effects of training on muscle skeletal load-over-time could be accurately measured. A pre-training force plate assessment performed in 112 Marines did not predict graduation rates. For Marines who successfully completed the course, serial measures obtained throughout training were highly variable for each individual and no firm conclusions could be drawn related to load imposed or the fitness attained during training.
Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Stress appraisal in the workplace and its associations with productivity and mood: Insights from a multimodal machine learning analysis Journal Article
In: PLoS ONE, vol. 19, no. 1, pp. e0296468, 2024, ISSN: 1932-6203.
@article{awada_stress_2024,
title = {Stress appraisal in the workplace and its associations with productivity and mood: Insights from a multimodal machine learning analysis},
author = {Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
editor = {Iftikhar Ahmed Khan},
url = {https://dx.plos.org/10.1371/journal.pone.0296468},
doi = {10.1371/journal.pone.0296468},
issn = {1932-6203},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {PLoS ONE},
volume = {19},
number = {1},
pages = {e0296468},
abstract = {Previous studies have primarily focused on predicting stress arousal, encompassing physiological, behavioral, and psychological responses to stressors, while neglecting the examination of stress appraisal. Stress appraisal involves the cognitive evaluation of a situation as stressful or non-stressful, and as a threat/pressure or a challenge/opportunity. In this study, we investigated several research questions related to the association between states of stress appraisal (i.e., boredom, eustress, coexisting eustress-distress, distress) and various factors such as stress levels, mood, productivity, physiological and behavioral responses, as well as the most effective ML algorithms and data signals for predicting stress appraisal. The results support the Yerkes-Dodson law, showing that a moderate stress level is associated with increased productivity and positive mood, while low and high levels of stress are related to decreased productivity and negative mood, with distress overpowering eustress when they coexist. Changes in stress appraisal relative to physiological and behavioral features were examined through the lenses of stress arousal, activity engagement, and performance. An XGBOOST model achieved the best prediction accuracies of stress appraisal, reaching 82.78% when combining physiological and behavioral features and 79.55% using only the physiological dataset. The small accuracy difference of 3% indicates that physiological data alone may be adequate to accurately predict stress appraisal, and the feature importance results identified electrodermal activity, skin temperature, and blood volume pulse as the most useful physiologic features. Implementing these models within work environments can serve as a foundation for designing workplace policies, practices, and stress management strategies that prioritize the promotion of eustress while reducing distress and boredom. Such efforts can foster a supportive work environment to enhance employee well-being and productivity.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Spiegel, Brennan M. R.; Rizzo, Albert; Persky, Susan; Liran, Omer; Wiederhold, Brenda; Woods, Susan; Donovan, Kate; Sarkar, Korak; Xiang, Henry; Joo, Sun; Jotwani, Rohan; Lang, Min; Paul, Margot; Senter-Zapata, Mike; Widmeier, Keith; Zhang, Haipeng
What Is Medical Extended Reality? A Taxonomy Defining the Current Breadth and Depth of an Evolving Field Journal Article
In: Journal of Medical Extended Reality, vol. 1, no. 1, pp. 4–12, 2024, ISSN: 2994-1520.
@article{spiegel_what_2024,
title = {What Is Medical Extended Reality? A Taxonomy Defining the Current Breadth and Depth of an Evolving Field},
author = {Brennan M. R. Spiegel and Albert Rizzo and Susan Persky and Omer Liran and Brenda Wiederhold and Susan Woods and Kate Donovan and Korak Sarkar and Henry Xiang and Sun Joo and Rohan Jotwani and Min Lang and Margot Paul and Mike Senter-Zapata and Keith Widmeier and Haipeng Zhang},
url = {https://www.liebertpub.com/doi/10.1089/jmxr.2023.0012},
doi = {10.1089/jmxr.2023.0012},
issn = {2994-1520},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-20},
journal = {Journal of Medical Extended Reality},
volume = {1},
number = {1},
pages = {4–12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 9798400700552.
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {9798400700552},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
Privacy-preserving Representation Learning for Speech Understanding Miscellaneous
2023, (arXiv:2310.17194 [eess]).
@misc{tran_privacy-preserving_2023,
title = {Privacy-preserving Representation Learning for Speech Understanding},
author = {Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2310.17194},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Existing privacy-preserving speech representation learning methods target a single application domain. In this paper, we present a novel framework to anonymize utterance-level speech embeddings generated by pre-trained encoders and show its effectiveness for a range of speech classification tasks. Specifically, given the representations from a pre-trained encoder, we train a Transformer to estimate the representations for the same utterances spoken by other speakers. During inference, the extracted representations can be converted into different identities to preserve privacy. We compare the results with the voice anonymization baselines from the VoicePrivacy 2022 challenge. We evaluate our framework on speaker identification for privacy and emotion recognition, depression classification, and intent classification for utility. Our method outperforms the baselines on privacy and utility in paralinguistic tasks and achieves comparable performance for intent classification.},
note = {arXiv:2310.17194 [eess]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; West, Taylor Nicole; Gratch, Jonathan; Fredrickson, Barbara
Can AI Agents Help Humans to Connect? Technical Report
PsyArXiv 2023.
@techreport{prinzing_can_2023,
title = {Can AI Agents Help Humans to Connect?},
author = {Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and Taylor Nicole West and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/muq6s},
doi = {10.31234/osf.io/muq6s},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
institution = {PsyArXiv},
abstract = {This paper reports on a pre-registered experiment designed to test whether artificial agents can help people to create more moments of high-quality connection with other humans. Of four pre-registered hypotheses, we found (partial) support for only one.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gilani, Setareh Nasihati; Pollard, Kimberly; Traum, David
Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 71–75, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{nasihati_gilani_multimodal_2023,
title = {Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions},
author = {Setareh Nasihati Gilani and Kimberly Pollard and David Traum},
url = {https://dl.acm.org/doi/10.1145/3610661.3617166},
doi = {10.1145/3610661.3617166},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {71–75},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Hudson, Taylor; Arstein, Ron; Voss, Clare; Traum, David
Navigating to Success in Multi-Modal Human-Robot Collaboration: Analysis and Corpus Release Miscellaneous
2023, (arXiv:2310.17568 [cs]).
@misc{lukin_navigating_2023,
title = {Navigating to Success in Multi-Modal Human-Robot Collaboration: Analysis and Corpus Release},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Taylor Hudson and Ron Arstein and Clare Voss and David Traum},
url = {http://arxiv.org/abs/2310.17568},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human-guided robotic exploration is a useful approach to gathering information at remote locations, especially those that might be too risky, inhospitable, or inaccessible for humans. Maintaining common ground between the remotely-located partners is a challenge, one that can be facilitated by multi-modal communication. In this paper, we explore how participants utilized multiple modalities to investigate a remote location with the help of a robotic partner. Participants issued spoken natural language instructions and received from the robot: text-based feedback, continuous 2D LIDAR mapping, and upon-request static photographs. We noticed that different strategies were adopted in terms of use of the modalities, and hypothesize that these differences may be correlated with success at several exploration sub-tasks. We found that requesting photos may have improved the identification and counting of some key entities (doorways in particular) and that this strategy did not hinder the amount of overall area exploration. Future work with larger samples may reveal the effects of more nuanced photo and dialogue strategies, which can inform the training of robotic agents. Additionally, we announce the release of our unique multi-modal corpus of human-robot communication in an exploration context: SCOUT, the Situated Corpus on Understanding Transactions.},
note = {arXiv:2310.17568 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2024
Barrett, Trevor; Faulk, Robert; Sergeant, Army Master; Boberg, Jill; Bartels, Matthew; Colonel, Marine Lieutenant; Saxon, Leslie A.
Force plate assessments in reconnaissance marine training company Journal Article
In: BMC Sports Sci Med Rehabil, vol. 16, no. 1, pp. 16, 2024, ISSN: 2052-1847.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{barrett_force_2024,
title = {Force plate assessments in reconnaissance marine training company},
author = {Trevor Barrett and Robert Faulk and Army Master Sergeant and Jill Boberg and Matthew Bartels and Marine Lieutenant Colonel and Leslie A. Saxon},
url = {https://bmcsportsscimedrehabil.biomedcentral.com/articles/10.1186/s13102-023-00796-z},
doi = {10.1186/s13102-023-00796-z},
issn = {2052-1847},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-22},
journal = {BMC Sports Sci Med Rehabil},
volume = {16},
number = {1},
pages = {16},
abstract = {Abstract
The ability to obtain dynamic movement assessments using force plate technology holds the promise of providing more detailed knowledge of the strength, balance and forces generated by active-duty military personnel. To date, there are not well-defined use cases for implementation of force plate assessments in military training environments. We sought to determine if force plate technology assessments could provide additional insights, related to the likelihood of graduation, beyond that provided by traditional physical fitness tests (PFT’s), in an elite Marine training school. Serial force plate measures were also obtained on those Marines successfully completing training to determine if consistent measures reflecting the effects of training on muscle skeletal load-over-time could be accurately measured. A pre-training force plate assessment performed in 112 Marines did not predict graduation rates. For Marines who successfully completed the course, serial measures obtained throughout training were highly variable for each individual and no firm conclusions could be drawn related to load imposed or the fitness attained during training.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
The ability to obtain dynamic movement assessments using force plate technology holds the promise of providing more detailed knowledge of the strength, balance and forces generated by active-duty military personnel. To date, there are not well-defined use cases for implementation of force plate assessments in military training environments. We sought to determine if force plate technology assessments could provide additional insights, related to the likelihood of graduation, beyond that provided by traditional physical fitness tests (PFT’s), in an elite Marine training school. Serial force plate measures were also obtained on those Marines successfully completing training to determine if consistent measures reflecting the effects of training on muscle skeletal load-over-time could be accurately measured. A pre-training force plate assessment performed in 112 Marines did not predict graduation rates. For Marines who successfully completed the course, serial measures obtained throughout training were highly variable for each individual and no firm conclusions could be drawn related to load imposed or the fitness attained during training.
Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Stress appraisal in the workplace and its associations with productivity and mood: Insights from a multimodal machine learning analysis Journal Article
In: PLoS ONE, vol. 19, no. 1, pp. e0296468, 2024, ISSN: 1932-6203.
Abstract | Links | BibTeX | Tags: Machine Learning, UARC
@article{awada_stress_2024,
title = {Stress appraisal in the workplace and its associations with productivity and mood: Insights from a multimodal machine learning analysis},
author = {Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
editor = {Iftikhar Ahmed Khan},
url = {https://dx.plos.org/10.1371/journal.pone.0296468},
doi = {10.1371/journal.pone.0296468},
issn = {1932-6203},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {PLoS ONE},
volume = {19},
number = {1},
pages = {e0296468},
abstract = {Previous studies have primarily focused on predicting stress arousal, encompassing physiological, behavioral, and psychological responses to stressors, while neglecting the examination of stress appraisal. Stress appraisal involves the cognitive evaluation of a situation as stressful or non-stressful, and as a threat/pressure or a challenge/opportunity. In this study, we investigated several research questions related to the association between states of stress appraisal (i.e., boredom, eustress, coexisting eustress-distress, distress) and various factors such as stress levels, mood, productivity, physiological and behavioral responses, as well as the most effective ML algorithms and data signals for predicting stress appraisal. The results support the Yerkes-Dodson law, showing that a moderate stress level is associated with increased productivity and positive mood, while low and high levels of stress are related to decreased productivity and negative mood, with distress overpowering eustress when they coexist. Changes in stress appraisal relative to physiological and behavioral features were examined through the lenses of stress arousal, activity engagement, and performance. An XGBOOST model achieved the best prediction accuracies of stress appraisal, reaching 82.78% when combining physiological and behavioral features and 79.55% using only the physiological dataset. The small accuracy difference of 3% indicates that physiological data alone may be adequate to accurately predict stress appraisal, and the feature importance results identified electrodermal activity, skin temperature, and blood volume pulse as the most useful physiologic features. Implementing these models within work environments can serve as a foundation for designing workplace policies, practices, and stress management strategies that prioritize the promotion of eustress while reducing distress and boredom. Such efforts can foster a supportive work environment to enhance employee well-being and productivity.},
keywords = {Machine Learning, UARC},
pubstate = {published},
tppubtype = {article}
}
Spiegel, Brennan M. R.; Rizzo, Albert; Persky, Susan; Liran, Omer; Wiederhold, Brenda; Woods, Susan; Donovan, Kate; Sarkar, Korak; Xiang, Henry; Joo, Sun; Jotwani, Rohan; Lang, Min; Paul, Margot; Senter-Zapata, Mike; Widmeier, Keith; Zhang, Haipeng
What Is Medical Extended Reality? A Taxonomy Defining the Current Breadth and Depth of an Evolving Field Journal Article
In: Journal of Medical Extended Reality, vol. 1, no. 1, pp. 4–12, 2024, ISSN: 2994-1520.
Links | BibTeX | Tags: MedVR, UARC
@article{spiegel_what_2024,
title = {What Is Medical Extended Reality? A Taxonomy Defining the Current Breadth and Depth of an Evolving Field},
author = {Brennan M. R. Spiegel and Albert Rizzo and Susan Persky and Omer Liran and Brenda Wiederhold and Susan Woods and Kate Donovan and Korak Sarkar and Henry Xiang and Sun Joo and Rohan Jotwani and Min Lang and Margot Paul and Mike Senter-Zapata and Keith Widmeier and Haipeng Zhang},
url = {https://www.liebertpub.com/doi/10.1089/jmxr.2023.0012},
doi = {10.1089/jmxr.2023.0012},
issn = {2994-1520},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-20},
journal = {Journal of Medical Extended Reality},
volume = {1},
number = {1},
pages = {4–12},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
2023
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
Abstract | Links | BibTeX | Tags: AI, UARC, Virtual Humans
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
Abstract | Links | BibTeX | Tags: AI, Dialogue, UARC, Virtual Humans
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {AI, Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 9798400700552.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {9798400700552},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: AI, UARC, Virtual Humans
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
Privacy-preserving Representation Learning for Speech Understanding Miscellaneous
2023, (arXiv:2310.17194 [eess]).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@misc{tran_privacy-preserving_2023,
title = {Privacy-preserving Representation Learning for Speech Understanding},
author = {Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2310.17194},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Existing privacy-preserving speech representation learning methods target a single application domain. In this paper, we present a novel framework to anonymize utterance-level speech embeddings generated by pre-trained encoders and show its effectiveness for a range of speech classification tasks. Specifically, given the representations from a pre-trained encoder, we train a Transformer to estimate the representations for the same utterances spoken by other speakers. During inference, the extracted representations can be converted into different identities to preserve privacy. We compare the results with the voice anonymization baselines from the VoicePrivacy 2022 challenge. We evaluate our framework on speaker identification for privacy and emotion recognition, depression classification, and intent classification for utility. Our method outperforms the baselines on privacy and utility in paralinguistic tasks and achieves comparable performance for intent classification.},
note = {arXiv:2310.17194 [eess]},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; West, Taylor Nicole; Gratch, Jonathan; Fredrickson, Barbara
Can AI Agents Help Humans to Connect? Technical Report
PsyArXiv 2023.
Abstract | Links | BibTeX | Tags: AI, UARC, Virtual Humans
@techreport{prinzing_can_2023,
title = {Can AI Agents Help Humans to Connect?},
author = {Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and Taylor Nicole West and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/muq6s},
doi = {10.31234/osf.io/muq6s},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
institution = {PsyArXiv},
abstract = {This paper reports on a pre-registered experiment designed to test whether artificial agents can help people to create more moments of high-quality connection with other humans. Of four pre-registered hypotheses, we found (partial) support for only one.},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
Abstract | Links | BibTeX | Tags: Dialogue, UARC, Virtual Humans
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags: Machine Learning, UARC, Virtual Humans
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gilani, Setareh Nasihati; Pollard, Kimberly; Traum, David
Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 71–75, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{nasihati_gilani_multimodal_2023,
title = {Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions},
author = {Setareh Nasihati Gilani and Kimberly Pollard and David Traum},
url = {https://dl.acm.org/doi/10.1145/3610661.3617166},
doi = {10.1145/3610661.3617166},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {71–75},
publisher = {ACM},
address = {Paris France},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Hudson, Taylor; Arstein, Ron; Voss, Clare; Traum, David
Navigating to Success in Multi-Modal Human-Robot Collaboration: Analysis and Corpus Release Miscellaneous
2023, (arXiv:2310.17568 [cs]).
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@misc{lukin_navigating_2023,
title = {Navigating to Success in Multi-Modal Human-Robot Collaboration: Analysis and Corpus Release},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Taylor Hudson and Ron Arstein and Clare Voss and David Traum},
url = {http://arxiv.org/abs/2310.17568},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human-guided robotic exploration is a useful approach to gathering information at remote locations, especially those that might be too risky, inhospitable, or inaccessible for humans. Maintaining common ground between the remotely-located partners is a challenge, one that can be facilitated by multi-modal communication. In this paper, we explore how participants utilized multiple modalities to investigate a remote location with the help of a robotic partner. Participants issued spoken natural language instructions and received from the robot: text-based feedback, continuous 2D LIDAR mapping, and upon-request static photographs. We noticed that different strategies were adopted in terms of use of the modalities, and hypothesize that these differences may be correlated with success at several exploration sub-tasks. We found that requesting photos may have improved the identification and counting of some key entities (doorways in particular) and that this strategy did not hinder the amount of overall area exploration. Future work with larger samples may reveal the effects of more nuanced photo and dialogue strategies, which can inform the training of robotic agents. Additionally, we announce the release of our unique multi-modal corpus of human-robot communication in an exploration context: SCOUT, the Situated Corpus on Understanding Transactions.},
note = {arXiv:2310.17568 [cs]},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {misc}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
Links | BibTeX | Tags: Emotions, UARC, Virtual Humans
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.
Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents Journal Article
In: Safety Science, vol. 164, pp. 106175, 2023, ISSN: 09257535.
Links | BibTeX | Tags: Simulation, UARC, virtual reality
@article{liu_effectiveness_2023,
title = {Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0925753523001170},
doi = {10.1016/j.ssci.2023.106175},
issn = {09257535},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-22},
journal = {Safety Science},
volume = {164},
pages = {106175},
keywords = {Simulation, UARC, virtual reality},
pubstate = {published},
tppubtype = {article}
}
Kappas, Arvid; Gratch, Jonathan
These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI Journal Article
In: Affec Sci, 2023, ISSN: 2662-2041, 2662-205X.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{kappas_these_2023,
title = {These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI},
author = {Arvid Kappas and Jonathan Gratch},
url = {https://link.springer.com/10.1007/s42761-023-00211-3},
doi = {10.1007/s42761-023-00211-3},
issn = {2662-2041, 2662-205X},
year = {2023},
date = {2023-08-01},
urldate = {2023-09-20},
journal = {Affec Sci},
abstract = {Abstract
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.
Saxon, Leslie; Boberg, Jill; Faulk, Robert; Barrett, Trevor
Identifying relationships between compression garments and recovery in a military training environment Technical Report
In Review 2023.
Abstract | Links | BibTeX | Tags: CBC, UARC
@techreport{saxon_identifying_2023,
title = {Identifying relationships between compression garments and recovery in a military training environment},
author = {Leslie Saxon and Jill Boberg and Robert Faulk and Trevor Barrett},
url = {https://www.researchsquare.com/article/rs-3193173/v1},
doi = {10.21203/rs.3.rs-3193173/v1},
year = {2023},
date = {2023-07-01},
urldate = {2023-09-21},
institution = {In Review},
abstract = {Abstract
Development and maintenance of physical capabilities is an essential part of combat readiness in the military. This readiness requires continuous training and is therefore compromised by injury. Because Service Members (SMs) must be physically and cognitively prepared to conduct multifaceted operations in support of strategic objectives, and because the Department of Defense’s (DoD) non-deployable rate and annual costs associated with treating SMs continue to rise at an alarming rate, finding a far-reaching and efficient solution to prevent such injuries is a high priority. Compression garments (CGs) have become increasingly popular over the past decade in human performance applications, and reportedly facilitate post-exercise recovery by reducing muscle soreness, increasing blood lactate removal, and increasing perception of recovery, but the evidence is mixed, at best. In the current study we explored whether CG use, and duration of use, improves recovery and mitigates muscle soreness effectively in an elite Marine training course. In order to test this, we subjected Service Members to fatiguing exercise and then measured subjective and objective recovery and soreness using participant reports and grip and leg strength over a 72-hour recovery period. Findings from this study suggest that wearing CGs for post training recovery showed significant and moderate positive effects on subjective soreness, fatigue, and perceived level of recovery. We did not find statistically significant effects on physical performance while testing grip or leg strength. These findings suggest that CG may be a beneficial strategy for military training environments to accelerate muscle recovery after high-intensity exercise, without adverse effects to the wearer or negative impact on military training.},
keywords = {CBC, UARC},
pubstate = {published},
tppubtype = {techreport}
}
Development and maintenance of physical capabilities is an essential part of combat readiness in the military. This readiness requires continuous training and is therefore compromised by injury. Because Service Members (SMs) must be physically and cognitively prepared to conduct multifaceted operations in support of strategic objectives, and because the Department of Defense’s (DoD) non-deployable rate and annual costs associated with treating SMs continue to rise at an alarming rate, finding a far-reaching and efficient solution to prevent such injuries is a high priority. Compression garments (CGs) have become increasingly popular over the past decade in human performance applications, and reportedly facilitate post-exercise recovery by reducing muscle soreness, increasing blood lactate removal, and increasing perception of recovery, but the evidence is mixed, at best. In the current study we explored whether CG use, and duration of use, improves recovery and mitigates muscle soreness effectively in an elite Marine training course. In order to test this, we subjected Service Members to fatiguing exercise and then measured subjective and objective recovery and soreness using participant reports and grip and leg strength over a 72-hour recovery period. Findings from this study suggest that wearing CGs for post training recovery showed significant and moderate positive effects on subjective soreness, fatigue, and perceived level of recovery. We did not find statistically significant effects on physical performance while testing grip or leg strength. These findings suggest that CG may be a beneficial strategy for military training environments to accelerate muscle recovery after high-intensity exercise, without adverse effects to the wearer or negative impact on military training.
Nye, Benjamin D.; Okado, Yuko; Shiel, Aaron; Carr, Kayla; Rosenberg, Milton; Rice, Enora; Ostrander, Luke; Ju, Megan; Gutierrez, Cassandra; Ramirez, Dilan; Auerbach, Daniel; Aguirre, Angelica; Swartout, William
MentorStudio: Amplifying diverse voices through rapid, self-authorable virtual mentors Journal Article
In: 2023, (Publisher: Zenodo).
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Agents
@article{nye_mentorstudio_2023,
title = {MentorStudio: Amplifying diverse voices through rapid, self-authorable virtual mentors},
author = {Benjamin D. Nye and Yuko Okado and Aaron Shiel and Kayla Carr and Milton Rosenberg and Enora Rice and Luke Ostrander and Megan Ju and Cassandra Gutierrez and Dilan Ramirez and Daniel Auerbach and Angelica Aguirre and William Swartout},
url = {https://zenodo.org/record/8226275},
doi = {10.5281/ZENODO.8226275},
year = {2023},
date = {2023-07-01},
urldate = {2024-01-11},
abstract = {Mentoring promotes underserved students' STEM persistence but it is difficult to scale up. Virtual agents can amplify mentors' experiences to larger audiences, which is particularly important for mentors from under-represented backgrounds and for underserved students with less access to mentors. This paper introduces MentorStudio, an online platform that allows real-life mentors to self-record and publish video-based conversational virtual agents. MentorStudio's goals are to increase speed, scheduling flexibility, and autonomy in creating intelligent virtual mentors. MentorStudio platform components are introduced, along with initial feedback regarding usability and acceptance collected from 20 STEM mentors who recorded virtual mentors. Overall, the MentorStudio platform has good ease-of-use and acceptance among mentors and offers a platform capable of recording large number of mentors to expand their reach to an unlimited number of students.},
note = {Publisher: Zenodo},
keywords = {Learning Sciences, UARC, Virtual Agents},
pubstate = {published},
tppubtype = {article}
}
Rodrigues, Patrick B.; Singh, Rashmi; Oytun, Mert; Adami, Pooya; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale M.
A multidimensional taxonomy for human-robot interaction in construction Journal Article
In: Automation in Construction, vol. 150, pp. 104845, 2023, ISSN: 0926-5805.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{rodrigues_multidimensional_2023,
title = {A multidimensional taxonomy for human-robot interaction in construction},
author = {Patrick B. Rodrigues and Rashmi Singh and Mert Oytun and Pooya Adami and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale M. Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S092658052300105X},
doi = {10.1016/j.autcon.2023.104845},
issn = {0926-5805},
year = {2023},
date = {2023-06-01},
urldate = {2023-03-31},
journal = {Automation in Construction},
volume = {150},
pages = {104845},
abstract = {Despite the increased interest in construction robotics both in academia and the industry, insufficient attention has been given to aspects related to Human-Robot Interaction (HRI). Characterizing HRI for construction tasks can help researchers organize knowledge in a structured manner that allows for classifying construction robotics applications and comparing and benchmarking different studies. This paper builds upon existing taxonomies and empirical studies in HRI in various industries (e.g., construction, manufacturing, and military, among others) to propose a multidimensional taxonomy to characterize HRI applications in the construction industry. The taxonomy design followed a systematic literature review in which common themes were identified and grouped into 16 categories. The proposed taxonomy can be used as a foundation for systematic reviews and meta-analyses of HRI applications in construction and can benefit the construction industry by informing the design of collaborative tasks performed by human-robot teams.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Soleymani, Mohammad
A Speech Representation Anonymization Framework via Selective Noise Perturbation Proceedings Article
In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1–5, IEEE, Rhodes Island, Greece, 2023, ISBN: 978-1-72816-327-7.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tran_speech_2023,
title = {A Speech Representation Anonymization Framework via Selective Noise Perturbation},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10095173/},
doi = {10.1109/ICASSP49357.2023.10095173},
isbn = {978-1-72816-327-7},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1–5},
publisher = {IEEE},
address = {Rhodes Island, Greece},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Miller, John H.; Pynadath, David V.
The Role of Heuristics and Biases during Complex Choices with an AI Teammate Journal Article
In: AAAI, vol. 37, no. 5, pp. 5993–6001, 2023, ISSN: 2374-3468, 2159-5399.
Abstract | Links | BibTeX | Tags: AI, Social Simulation, UARC
@article{gurney_role_2023,
title = {The Role of Heuristics and Biases during Complex Choices with an AI Teammate},
author = {Nikolos Gurney and John H. Miller and David V. Pynadath},
url = {https://ojs.aaai.org/index.php/AAAI/article/view/25741},
doi = {10.1609/aaai.v37i5.25741},
issn = {2374-3468, 2159-5399},
year = {2023},
date = {2023-06-01},
urldate = {2023-12-08},
journal = {AAAI},
volume = {37},
number = {5},
pages = {5993–6001},
abstract = {Behavioral scientists have classically documented aversion to algorithmic decision aids, from simple linear models to AI. Sentiment, however, is changing and possibly accelerating AI helper usage. AI assistance is, arguably, most valuable when humans must make complex choices. We argue that classic experimental methods used to study heuristics and biases are insufficient for studying complex choices made with AI helpers. We adapted an experimental paradigm designed for studying complex choices in such contexts. We show that framing and anchoring effects impact how people work with an AI helper and are predictive of choice outcomes. The evidence suggests that some participants, particularly those in a loss frame, put too much faith in the AI helper and experienced worse choice outcomes by doing so. The paradigm also generates computational modeling-friendly data allowing future studies of human-AI decision making.},
keywords = {AI, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Gibson, C. Michael; Steinhubl, Steven; Lakkireddy, Dhanunjaya; Turakhia, Mintu P.; Passman, Rod; Jones, W. Schuyler; Bunch, T. Jared; Curtis, Anne B.; Peterson, Eric D.; Ruskin, Jeremy; Saxon, Leslie; Tarino, Michael; Tarakji, Khaldoun G.; Marrouche, Nassir; Patel, Mithun; Harxhi, Ante; Kaul, Simrati; Nikolovski, Janeta; Juan, Stephanie; Wildenhaus, Kevin; Damaraju, C. V.; Spertus, John A.
Does early detection of atrial fibrillation reduce the risk of thromboembolic events? Rationale and design of the Heartline study Journal Article
In: American Heart Journal, vol. 259, pp. 30–41, 2023, ISSN: 0002-8703.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{gibson_does_2023,
title = {Does early detection of atrial fibrillation reduce the risk of thromboembolic events? Rationale and design of the Heartline study},
author = {C. Michael Gibson and Steven Steinhubl and Dhanunjaya Lakkireddy and Mintu P. Turakhia and Rod Passman and W. Schuyler Jones and T. Jared Bunch and Anne B. Curtis and Eric D. Peterson and Jeremy Ruskin and Leslie Saxon and Michael Tarino and Khaldoun G. Tarakji and Nassir Marrouche and Mithun Patel and Ante Harxhi and Simrati Kaul and Janeta Nikolovski and Stephanie Juan and Kevin Wildenhaus and C. V. Damaraju and John A. Spertus},
url = {https://www.sciencedirect.com/science/article/pii/S0002870323000145},
doi = {10.1016/j.ahj.2023.01.004},
issn = {0002-8703},
year = {2023},
date = {2023-05-01},
urldate = {2023-03-31},
journal = {American Heart Journal},
volume = {259},
pages = {30–41},
abstract = {Background
The impact of using direct-to-consumer wearable devices as a means to timely detect atrial fibrillation (AF) and to improve clinical outcomes is unknown.
Methods
Heartline is a pragmatic, randomized, and decentralized application-based trial of US participants aged ≥65 years. Two randomized cohorts include adults with possession of an iPhone and without a history of AF and those with a diagnosis of AF taking a direct oral anticoagulant (DOAC) for ≥30 days. Participants within each cohort are randomized (3:1) to either a core digital engagement program (CDEP) via iPhone application (Heartline application) and an Apple Watch (Apple Watch Group) or CDEP alone (iPhone-only Group). The Apple Watch Group has the watch irregular rhythm notification (IRN) feature enabled and access to the ECG application on the Apple Watch. If an IRN notification is issued for suspected AF then the study application instructs participants in the Apple Watch Group to seek medical care. All participants were “watch-naïve” at time of enrollment and have an option to either buy or loan an Apple Watch as part of this study. The primary end point is time from randomization to clinical diagnosis of AF, with confirmation by health care claims. Key secondary endpoint are claims-based incidence of a 6-component composite cardiovascular/systemic embolism/mortality event, DOAC medication use and adherence, costs/health resource utilization, and frequency of hospitalizations for bleeding. All study assessments, including patient-reported outcomes, are conducted through the study application. The target study enrollment is approximately 28,000 participants in total; at time of manuscript submission, a total of 26,485 participants have been enrolled into the study.
Conclusion
The Heartline Study will assess if an Apple Watch with the IRN and ECG application, along with application-facilitated digital health engagement modules, improves time to AF diagnosis and cardiovascular outcomes in a real-world environment.
Trial registration
ClinicalTrials.gov Identifier: NCT04276441.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
The impact of using direct-to-consumer wearable devices as a means to timely detect atrial fibrillation (AF) and to improve clinical outcomes is unknown.
Methods
Heartline is a pragmatic, randomized, and decentralized application-based trial of US participants aged ≥65 years. Two randomized cohorts include adults with possession of an iPhone and without a history of AF and those with a diagnosis of AF taking a direct oral anticoagulant (DOAC) for ≥30 days. Participants within each cohort are randomized (3:1) to either a core digital engagement program (CDEP) via iPhone application (Heartline application) and an Apple Watch (Apple Watch Group) or CDEP alone (iPhone-only Group). The Apple Watch Group has the watch irregular rhythm notification (IRN) feature enabled and access to the ECG application on the Apple Watch. If an IRN notification is issued for suspected AF then the study application instructs participants in the Apple Watch Group to seek medical care. All participants were “watch-naïve” at time of enrollment and have an option to either buy or loan an Apple Watch as part of this study. The primary end point is time from randomization to clinical diagnosis of AF, with confirmation by health care claims. Key secondary endpoint are claims-based incidence of a 6-component composite cardiovascular/systemic embolism/mortality event, DOAC medication use and adherence, costs/health resource utilization, and frequency of hospitalizations for bleeding. All study assessments, including patient-reported outcomes, are conducted through the study application. The target study enrollment is approximately 28,000 participants in total; at time of manuscript submission, a total of 26,485 participants have been enrolled into the study.
Conclusion
The Heartline Study will assess if an Apple Watch with the IRN and ECG application, along with application-facilitated digital health engagement modules, improves time to AF diagnosis and cardiovascular outcomes in a real-world environment.
Trial registration
ClinicalTrials.gov Identifier: NCT04276441.
Pal, Debaditya; Leuski, Anton; Traum, David
Comparing Statistical Models for Retrieval based Question-answering Dialogue: BERT vs Relevance Models Journal Article
In: FLAIRS, vol. 36, 2023, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@article{pal_comparing_2023,
title = {Comparing Statistical Models for Retrieval based Question-answering Dialogue: BERT vs Relevance Models},
author = {Debaditya Pal and Anton Leuski and David Traum},
url = {https://journals.flvc.org/FLAIRS/article/view/133386},
doi = {10.32473/flairs.36.133386},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-23},
journal = {FLAIRS},
volume = {36},
abstract = {In this paper, we compare the performance of four models in a retrieval based question answering dialogue task on two moderately sized corpora (textasciitilde 10,000 utterances). One model is a statistical model and uses cross language relevance while the others are deep neural networks utilizing the BERT architecture along with different retrieval methods. The statistical model has previously outperformed LSTM based neural networks in a similar task whereas BERT has been proven to perform well on a variety of NLP tasks, achieving state-of-the-art results in many of them. Results show that the statistical cross language relevance model outperforms the BERT based architectures in learning question-answer mappings. BERT achieves better results by mapping new questions to existing questions.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {article}
}
Chadalapaka, Viswanath; Ustun, Volkan; Liu, Lixing
Leveraging Graph Networks to Model Environments in Reinforcement Learning Journal Article
In: FLAIRS, vol. 36, 2023, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC
@article{chadalapaka_leveraging_2023,
title = {Leveraging Graph Networks to Model Environments in Reinforcement Learning},
author = {Viswanath Chadalapaka and Volkan Ustun and Lixing Liu},
url = {https://journals.flvc.org/FLAIRS/article/view/133118},
doi = {10.32473/flairs.36.133118},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-04},
journal = {FLAIRS},
volume = {36},
abstract = {This paper proposes leveraging graph neural networks (GNNs) to model an agent’s environment to construct superior policy networks in reinforcement learning (RL). To this end, we explore the effects of different combinations of GNNs and graph network pooling functions on policy performance. We also run experiments at different levels of problem complexity, which affect how easily we expect an agent to learn an optimal policy and therefore show whether or not graph networks are effective at various problem complexity levels. The efficacy of our approach is shown via experimentation in a partially-observable, non-stationary environment that parallels the highly-practical scenario of a military training exercise with human trainees, where the learning goal is to become the best sparring partner possible for human trainees. Our results present that our models can generate better-performing sparring partners by employing GNNs, as demonstrated by these experiments in the proof-of-concept environment. We also explore our model’s applicability in Multi-Agent RL scenarios. Our code is available online at https://github.com/Derposoft/GNNsAsEnvs.},
keywords = {CogArch, Cognitive Architecture, UARC},
pubstate = {published},
tppubtype = {article}
}
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning Journal Article
In: FLAIRS, vol. 36, 2023, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC, Virtual Humans
@article{aris_learning_2023,
title = {Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/133348},
doi = {10.32473/flairs.36.133348},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-04},
journal = {FLAIRS},
volume = {36},
abstract = {This paper presents a reinforcement learning model designed to learn how to take cover on geo-specific terrains, an essential behavior component for military training simulations. Training of the models is performed on the Rapid Integration and Development Environment (RIDE) leveraging the Unity ML-Agents framework. This work expands on previous work on raycast-based agents by increasing the number of enemies from one to three. We demonstrate an automated way of generating training and testing data within geo-specific terrains. We show that replacing the action space with a more abstracted, navmesh-based waypoint movement system can increase the generality and success rate of the models while providing similar results to our previous paper's results regarding retraining across terrains. We also comprehensively evaluate the differences between these and the previous models. Finally, we show that incorporating pixels into the model's input can increase performance at the cost of longer training times.},
keywords = {CogArch, Cognitive Architecture, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Murawski, Alaine; Ramirez-Zohfeld, Vanessa; Schierer, Allison; Olvera, Charles; Mell, Johnathan; Gratch, Jonathan; Brett, Jeanne; Lindquist, Lee A.
Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers Journal Article
In: Geriatrics, vol. 8, no. 2, pp. 36, 2023, ISSN: 2308-3417, (Number: 2 Publisher: Multidisciplinary Digital Publishing Institute).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{murawski_transforming_2023,
title = {Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers},
author = {Alaine Murawski and Vanessa Ramirez-Zohfeld and Allison Schierer and Charles Olvera and Johnathan Mell and Jonathan Gratch and Jeanne Brett and Lee A. Lindquist},
url = {https://www.mdpi.com/2308-3417/8/2/36},
doi = {10.3390/geriatrics8020036},
issn = {2308-3417},
year = {2023},
date = {2023-04-01},
urldate = {2023-03-31},
journal = {Geriatrics},
volume = {8},
number = {2},
pages = {36},
abstract = {Background: Family caregivers of older people with Alzheimer’s dementia (PWD) often need to advocate and resolve health-related conflicts (e.g., determining treatment necessity, billing errors, and home health extensions). As they deal with these health system conflicts, family caregivers experience unnecessary frustration, anxiety, and stress. The goal of this research was to apply a negotiation framework to resolve real-world family caregiver–older adult conflicts. Methods: We convened an interdisciplinary team of national community-based family caregivers, social workers, geriatricians, and negotiation experts (n = 9; Illinois, Florida, New York, and California) to examine the applicability of negotiation and conflict management frameworks to three older adult–caregiver conflicts (i.e., caregiver–older adult, caregiver–provider, and caregiver–caregiver). The panel of caregivers provided scenarios and dialogue describing conflicts they experienced in these three settings. A qualitative analysis was then performed grouping the responses into a framework matrix. Results: Upon presenting the three conflicts to the caregivers, 96 responses (caregiver–senior), 75 responses (caregiver–caregiver), and 80 responses (caregiver–provider) were generated. A thematic analysis showed that the statements and responses fit the interest–rights–power (IRP) negotiation framework. Discussion: The interests–rights–power (IRP) framework, used in business negotiations, provided insight into how caregivers experienced conflict with older adults, providers, and other caregivers. Future research is needed to examine applying the IRP framework in the training of caregivers of older people with Alzheimer’s dementia.},
note = {Number: 2
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Liu, Ruying; Zhu, Runhe; Becerik‐Gerber, Burcin; Lucas, Gale M.; Southers, Erroll G.
Be prepared: How training and emergency type affect evacuation behaviour Journal Article
In: Computer Assisted Learning, pp. jcal.12812, 2023, ISSN: 0266-4909, 1365-2729.
Abstract | Links | BibTeX | Tags: Simulation, UARC
@article{liu_be_2023,
title = {Be prepared: How training and emergency type affect evacuation behaviour},
author = {Ruying Liu and Runhe Zhu and Burcin Becerik‐Gerber and Gale M. Lucas and Erroll G. Southers},
url = {https://onlinelibrary.wiley.com/doi/10.1111/jcal.12812},
doi = {10.1111/jcal.12812},
issn = {0266-4909, 1365-2729},
year = {2023},
date = {2023-04-01},
urldate = {2023-08-22},
journal = {Computer Assisted Learning},
pages = {jcal.12812},
abstract = {Abstract
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.},
keywords = {Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.
Gratch, Jonathan
The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective Journal Article
In: Philosophical Transactions of the Royal Society B: Biological Sciences, vol. 378, no. 1875, pp. 20210475, 2023, (Publisher: Royal Society).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{gratch_promise_2023,
title = {The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective},
author = {Jonathan Gratch},
url = {https://royalsocietypublishing.org/doi/abs/10.1098/rstb.2021.0475},
doi = {10.1098/rstb.2021.0475},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Philosophical Transactions of the Royal Society B: Biological Sciences},
volume = {378},
number = {1875},
pages = {20210475},
abstract = {In face-to-face interactions, parties rapidly react and adapt to each other's words, movements and expressions. Any science of face-to-face interaction must develop approaches to hypothesize and rigorously test mechanisms that explain such interdependent behaviour. Yet conventional experimental designs often sacrifice interactivity to establish experimental control. Interactive virtual and robotic agents have been offered as a way to study true interactivity while enforcing a measure of experimental control by allowing participants to interact with realistic but carefully controlled partners. But as researchers increasingly turn to machine learning to add realism to such agents, they may unintentionally distort the very interactivity they seek to illuminate, particularly when investigating the role of non-verbal signals such as emotion or active-listening behaviours. Here I discuss some of the methodological challenges that may arise when machine learning is used to model the behaviour of interaction partners. By articulating and explicitly considering these commitments, researchers can transform ‘unintentional distortions’ into valuable methodological tools that yield new insights and better contextualize existing experimental findings that rely on learning technology.
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.},
note = {Publisher: Royal Society},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.
Pynadath, David V.; Dilkina, Bistra; Jeong, David C.; John, Richard S.; Marsella, Stacy C.; Merchant, Chirag; Miller, Lynn C.; Read, Stephen J.
Disaster world Journal Article
In: Comput Math Organ Theory, vol. 29, no. 1, pp. 84–117, 2023, ISSN: 1572-9346.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@article{pynadath_disaster_2023,
title = {Disaster world},
author = {David V. Pynadath and Bistra Dilkina and David C. Jeong and Richard S. John and Stacy C. Marsella and Chirag Merchant and Lynn C. Miller and Stephen J. Read},
url = {https://doi.org/10.1007/s10588-022-09359-y},
doi = {10.1007/s10588-022-09359-y},
issn = {1572-9346},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Comput Math Organ Theory},
volume = {29},
number = {1},
pages = {84–117},
abstract = {Artificial intelligence (AI) research provides a rich source of modeling languages capable of generating socially plausible simulations of human behavior, while also providing a transparent ground truth that can support validation of social-science methods applied to that simulation. In this work, we leverage two established AI representations: decision-theoretic planning and recursive modeling. Decision-theoretic planning (specifically Partially Observable Markov Decision Processes) provides agents with quantitative models of their corresponding real-world entities’ subjective (and possibly incorrect) perspectives of ground truth in the form of probabilistic beliefs and utility functions. Recursive modeling gives an agent a theory of mind, which is necessary when a person’s (again, possibly incorrect) subjective perspectives are of another person, rather than of just his/her environment. We used PsychSim, a multiagent social-simulation framework combining these two AI frameworks, to build a general parameterized model of human behavior during disaster response, grounding the model in social-psychological theories to ensure social plausibility. We then instantiated that model into alternate ground truths for simulating population response to a series of natural disasters, namely, hurricanes. The simulations generate data in response to socially plausible instruments (e.g., surveys) that serve as input to the Ground Truth program’s designated research teams for them to conduct simulated social science. The simulation also provides a graphical ground truth and a set of outcomes to be used as the gold standard in evaluating the research teams’ inferences.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Hsu, Wan-Yu; Anguera, Joaquin A.; Rizzo, Albert; Campusano, Richard; Chiaravalloti, Nancy D.; DeLuca, John; Gazzaley, Adam; Bove, Riley M.
A virtual reality program to assess cognitive function in multiple sclerosis: A pilot study Journal Article
In: Frontiers in Human Neuroscience, 2023, (Place: Lausanne, Switzerland Publisher: Frontiers Research Foundation Section: ORIGINAL RESEARCH article).
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{hsu_virtual_2023,
title = {A virtual reality program to assess cognitive function in multiple sclerosis: A pilot study},
author = {Wan-Yu Hsu and Joaquin A. Anguera and Albert Rizzo and Richard Campusano and Nancy D. Chiaravalloti and John DeLuca and Adam Gazzaley and Riley M. Bove},
url = {https://www.proquest.com/docview/2787027204/abstract/BEA88F7BB72B4623PQ/1},
doi = {10.3389/fnhum.2023.1139316},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Frontiers in Human Neuroscience},
abstract = {Introduction: Cognitive impairment is a debilitating symptom in people with multiple sclerosis (MS). Most of the neuropsychological tasks have little resemblance to everyday life. There is a need for ecologically valid tools for assessing cognition in real-life functional contexts in MS. One potential solution would involve the use of virtual reality (VR) to exert finer control over the task presentation environment; however, VR studies in the MS population are scarce. Objectives: To explore the utility and feasibility of a VR program for cognitive assessment in MS. Methods: A VR classroom embedded with a continuous performance task (CPT) was assessed in 10 non-MS adults and 10 people with MS with low cognitive functioning. Participants performed the CPT with distractors (ie. WD) and without distractors (ie. ND). The Symbol Digit Modalities Test (SDMT), California Verbal Learning Test – II (CVLT-II), and a feedback survey on the VR program were administered. Results: People with MS exhibited greater reaction time variability (RTV) compared to non-MS participants, and greater RTV in both WD and ND conditions was associated with lower SDMT. Conclusions: VR tools warrant further research to determine their value as an ecologically valid platform for assessing cognition and everyday functioning in people with MS.},
note = {Place: Lausanne, Switzerland
Publisher: Frontiers Research Foundation
Section: ORIGINAL RESEARCH article},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Feng, Andrew
Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction Proceedings Article
In: 2023 57th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Baltimore, MD, USA, 2023, ISBN: 978-1-66545-181-9.
Links | BibTeX | Tags: Narrative, UARC
@inproceedings{gordon_searching_2023,
title = {Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10089729/},
doi = {10.1109/CISS56502.2023.10089729},
isbn = {978-1-66545-181-9},
year = {2023},
date = {2023-03-01},
urldate = {2023-08-07},
booktitle = {2023 57th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Baltimore, MD, USA},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Liu, Ruying; Seyedrezaei, Mirmahdi; Lu, Zheng; Xenakis, Matheos; Lucas, Gale; Roll, Shawn C.; Narayanan, Shrikanth
Ten questions concerning the impact of environmental stress on office workers Journal Article
In: Building and Environment, vol. 229, pp. 109964, 2023, ISSN: 0360-1323.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{awada_ten_2023,
title = {Ten questions concerning the impact of environmental stress on office workers},
author = {Mohamad Awada and Burcin Becerik-Gerber and Ruying Liu and Mirmahdi Seyedrezaei and Zheng Lu and Matheos Xenakis and Gale Lucas and Shawn C. Roll and Shrikanth Narayanan},
url = {https://www.sciencedirect.com/science/article/pii/S0360132322011945},
doi = {10.1016/j.buildenv.2022.109964},
issn = {0360-1323},
year = {2023},
date = {2023-02-01},
urldate = {2023-03-31},
journal = {Building and Environment},
volume = {229},
pages = {109964},
abstract = {We regularly face stress during our everyday activities, to the extent that stress is recognized by the World Health Organization as the epidemic of the 21st century. Stress is how humans respond physically and psychologically to adjustments, experiences, conditions, and circumstances in their lives. While there are many reasons for stress, work and job pressure remain the main cause. Thus, companies are increasingly interested in creating healthier, more comfortable, and stress-free offices for their workers. The indoor environment can induce environmental stress when it cannot satisfy the individual needs for health and comfort. In fact, office environmental conditions (e.g., thermal, and indoor air conditions, lighting, and noise) and interior design parameters (e.g., office layout, colors, furniture, access to views, distance to window, personal control and biophilic design) have been found to affect office workers' stress levels. A line of research based on the stress recovery theory offers new insights for establishing offices that limit environmental stress and help with work stress recovery. To that end, this paper answers ten questions that explore the relation between the indoor office-built environment and stress levels among workers. The answers to the ten questions are based on an extensive literature review to draw conclusions from what has been achieved to date. Thus, this study presents a foundation for future environmental stress related research in offices.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Adami, Pooya; Singh, Rashmi; Rodrigues, Patrick Borges; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
In: Advanced Engineering Informatics, vol. 55, pp. 101837, 2023, ISSN: 1474-0346.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{adami_participants_2023,
title = {Participants matter: Effectiveness of VR-based training on the knowledge, trust in the robot, and self-efficacy of construction workers and university students},
author = {Pooya Adami and Rashmi Singh and Patrick Borges Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S1474034622002956},
doi = {10.1016/j.aei.2022.101837},
issn = {1474-0346},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Advanced Engineering Informatics},
volume = {55},
pages = {101837},
abstract = {Virtual Reality (VR)-based training has gained attention from the scientific community in the Architecture, Engineering, and Construction (AEC) industry as a cost-effective and safe method that eliminates the safety risks that may impose on workers during the training compared to traditional training methods (e.g., in-person hands-on training, apprenticeship). Although researchers have developed VR-based training for construction workers, some have recruited students rather than workers to understand the effect of their VR-based training. However, students are different from construction workers in many ways, which can threaten the validity of such studies. Hence, research is needed to investigate the extent to which the findings of a VR-based training study are contingent on whether students or construction workers were used as the study sample. This paper strives to compare the effectiveness of VR-based training on university students’ and construction workers’ knowledge acquisition, trust in the robot, and robot operation self-efficacy in remote operation of a construction robot. Twenty-five construction workers and twenty-five graduate construction engineering students were recruited to complete a VR-based training for remote operating a demolition robot. We used quantitative analyses to answer our research questions. Our study shows that the results are dependent on the target sample in that students gained more knowledge, whereas construction workers gained more trust in the robot and more self-efficacy in robot operation. These findings suggest that the effectiveness of VR-based training on students may not necessarily associate with its effectiveness on construction workers.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale M.; Mell, Johnathan; Boberg, Jill; Zenone, Forrest; Visser, Ewart J.; Tossell, Chad; Seech, Todd
Customizing virtual interpersonal skills training applications may not improve trainee performance Journal Article
In: Sci Rep, vol. 13, no. 1, pp. 78, 2023, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lucas_customizing_2023,
title = {Customizing virtual interpersonal skills training applications may not improve trainee performance},
author = {Gale M. Lucas and Johnathan Mell and Jill Boberg and Forrest Zenone and Ewart J. Visser and Chad Tossell and Todd Seech},
url = {https://www.nature.com/articles/s41598-022-27154-2},
doi = {10.1038/s41598-022-27154-2},
issn = {2045-2322},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {13},
number = {1},
pages = {78},
abstract = {While some theoretical perspectives imply that the context of a virtual training should be customized to match the intended context where those skills would ultimately be applied, others suggest this might not be necessary for learning. It is important to determine whether manipulating context matters for performance in training applications because customized virtual training systems made for specific use cases are more costly than generic “off-the-shelf” ones designed for a broader set of users. Accordingly, we report a study where military cadets use a virtual platform to practice their negotiation skills, and are randomly assigned to one of two virtual context conditions: military versus civilian. Out of 28 measures capturing performance in the negotiation, there was only one significant result: cadets in the civilian condition politely ask the agent to make an offer significantly more than those in the military condition. These results imply that—for this interpersonal skills application, and perhaps ones like it—virtual context may matter very little for performance during social skills training, and that commercial systems may yield real benefits to military scenarios with little-to-no modification.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lei, Su; Gratch, Jonathan
Emotional Expressivity is a Reliable Signal of Surprise Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lei_emotional_2023,
title = {Emotional Expressivity is a Reliable Signal of Surprise},
author = {Su Lei and Jonathan Gratch},
doi = {10.1109/TAFFC.2023.3234015},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {We consider the problem of inferring what happened to a person in a social task from momentary facial reactions. To approach this, we introduce several innovations. First, rather than predicting what (observers think) someone feels, we predict objective features of the event that immediately preceded the facial reactions. Second, we draw on appraisal theory, a key psychological theory of emotion, to characterize features of this immediately-preceded event. Specifically, we explore if facial expressions reveal if the event is expected, goal-congruent, and norm-compatible. Finally, we argue that emotional expressivity serves as a better feature for characterizing momentary expressions than traditional facial features. Specifically, we use supervised machine learning to predict third-party judgments of emotional expressivity with high accuracy, and show this model improves inferences about the nature of the event that preceded an emotional reaction. Contrary to common sense, “genuine smiles” failed to predict if an event advanced a person's goals. Rather, expressions best revealed if an event violated expectations. We discussed the implications of these findings for the interpretation of facial displays and potential limitations that could impact the generality of these findings.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale M.; Gratch, Jonathan
Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{chawla_towards_2023,
title = {Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale M. Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/abstract/document/10021626},
doi = {10.1109/TAFFC.2023.3238007},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {Negotiation is a complex social interaction that encapsulates emotional encounters in human decision-making. Virtual agents that can negotiate with humans by the means of language are useful in pedagogy and conversational AI. To advance the development of such agents, we explore the role of emotion in the prediction of two important subjective goals in a negotiation – outcome satisfaction and partner perception. We devise ways to measure and compare different degrees of emotion expression in negotiation dialogues, consisting of emoticon, lexical, and contextual variables. Through an extensive analysis of a large-scale dataset in chat-based negotiations, we find that incorporating emotion expression explains significantly more variance, above and beyond the demographics and personality traits of the participants. Further, our temporal analysis reveals that emotive information from both early and later stages of the negotiation contributes to this prediction, indicating the need for a continual learning model of capturing emotion for automated agents. Finally, we extend our analysis to another dataset, showing promise that our findings generalize to more complex scenarios. We conclude by discussing our insights, which will be helpful for designing adaptive negotiation agents that interact through realistic communication interfaces.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Vlake, Johan H.; Bommel, Jasper; Riva, Giuseppe; Wiederhold, Brenda K.; Cipresso, Pietro; Rizzo, Albert Skip; Botella, Cristina; Hooft, Lotty; Bienvenu, O. Joseph; Geerts, Bart; Wils, Evert-Jan; Gommers, Diederik; Genderen, Michel E.
Reporting the early stage clinical evaluation of virtual-reality-based intervention trials: RATE-VR Journal Article
In: Nat Med, vol. 29, no. 1, pp. 12–13, 2023, ISSN: 1546-170X, (Number: 1 Publisher: Nature Publishing Group).
Links | BibTeX | Tags: MedVR, UARC
@article{vlake_reporting_2023,
title = {Reporting the early stage clinical evaluation of virtual-reality-based intervention trials: RATE-VR},
author = {Johan H. Vlake and Jasper Bommel and Giuseppe Riva and Brenda K. Wiederhold and Pietro Cipresso and Albert Skip Rizzo and Cristina Botella and Lotty Hooft and O. Joseph Bienvenu and Bart Geerts and Evert-Jan Wils and Diederik Gommers and Michel E. Genderen},
url = {https://www.nature.com/articles/s41591-022-02085-7},
doi = {10.1038/s41591-022-02085-7},
issn = {1546-170X},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Nat Med},
volume = {29},
number = {1},
pages = {12–13},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Zifan; Chen, Meida; Zhang, Zhikang; You, Suya; Ren, Fengbo
TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: STG, UARC
@article{yu_transupr_2023,
title = {TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation},
author = {Zifan Yu and Meida Chen and Zhikang Zhang and Suya You and Fengbo Ren},
url = {https://arxiv.org/abs/2302.08594},
doi = {10.48550/ARXIV.2302.08594},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
abstract = {In this work, we target the problem of uncertain points refinement for image-based LiDAR point cloud semantic segmentation (LiDAR PCSS). This problem mainly results from the boundary-blurring problem of convolution neural networks (CNNs) and quantitation loss of spherical projection, which are often hard to avoid for common image-based LiDAR PCSS approaches. We propose a plug-and-play transformer-based uncertain point refiner (TransUPR) to address the problem. Through local feature aggregation, uncertain point localization, and self-attention-based transformer design, TransUPR, integrated into an existing range image-based LiDAR PCSS approach (e.g., CENet), achieves the state-of-the-art performance (68.2% mIoU) on Semantic-KITTI benchmark, which provides a performance improvement of 0.6% on the mIoU.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-01-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}