Publications
Search
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Hudson, Taylor; Arstein, Ron; Voss, Clare; Traum, David
Navigating to Success in Multi-Modal Human-Robot Collaboration: Analysis and Corpus Release Miscellaneous
2023, (arXiv:2310.17568 [cs]).
@misc{lukin_navigating_2023,
title = {Navigating to Success in Multi-Modal Human-Robot Collaboration: Analysis and Corpus Release},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Taylor Hudson and Ron Arstein and Clare Voss and David Traum},
url = {http://arxiv.org/abs/2310.17568},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human-guided robotic exploration is a useful approach to gathering information at remote locations, especially those that might be too risky, inhospitable, or inaccessible for humans. Maintaining common ground between the remotely-located partners is a challenge, one that can be facilitated by multi-modal communication. In this paper, we explore how participants utilized multiple modalities to investigate a remote location with the help of a robotic partner. Participants issued spoken natural language instructions and received from the robot: text-based feedback, continuous 2D LIDAR mapping, and upon-request static photographs. We noticed that different strategies were adopted in terms of use of the modalities, and hypothesize that these differences may be correlated with success at several exploration sub-tasks. We found that requesting photos may have improved the identification and counting of some key entities (doorways in particular) and that this strategy did not hinder the amount of overall area exploration. Future work with larger samples may reveal the effects of more nuanced photo and dialogue strategies, which can inform the training of robotic agents. Additionally, we announce the release of our unique multi-modal corpus of human-robot communication in an exploration context: SCOUT, the Situated Corpus on Understanding Transactions.},
note = {arXiv:2310.17568 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Gilani, Setareh Nasihati; Pollard, Kimberly; Traum, David
Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 71–75, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{nasihati_gilani_multimodal_2023,
title = {Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions},
author = {Setareh Nasihati Gilani and Kimberly Pollard and David Traum},
url = {https://dl.acm.org/doi/10.1145/3610661.3617166},
doi = {10.1145/3610661.3617166},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {71–75},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; West, Taylor Nicole; Gratch, Jonathan; Fredrickson, Barbara
Can AI Agents Help Humans to Connect? Technical Report
PsyArXiv 2023.
@techreport{prinzing_can_2023,
title = {Can AI Agents Help Humans to Connect?},
author = {Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and Taylor Nicole West and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/muq6s},
doi = {10.31234/osf.io/muq6s},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
institution = {PsyArXiv},
abstract = {This paper reports on a pre-registered experiment designed to test whether artificial agents can help people to create more moments of high-quality connection with other humans. Of four pre-registered hypotheses, we found (partial) support for only one.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
Privacy-preserving Representation Learning for Speech Understanding Miscellaneous
2023, (arXiv:2310.17194 [eess]).
@misc{tran_privacy-preserving_2023,
title = {Privacy-preserving Representation Learning for Speech Understanding},
author = {Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2310.17194},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Existing privacy-preserving speech representation learning methods target a single application domain. In this paper, we present a novel framework to anonymize utterance-level speech embeddings generated by pre-trained encoders and show its effectiveness for a range of speech classification tasks. Specifically, given the representations from a pre-trained encoder, we train a Transformer to estimate the representations for the same utterances spoken by other speakers. During inference, the extracted representations can be converted into different identities to preserve privacy. We compare the results with the voice anonymization baselines from the VoicePrivacy 2022 challenge. We evaluate our framework on speaker identification for privacy and emotion recognition, depression classification, and intent classification for utility. Our method outperforms the baselines on privacy and utility in paralinguistic tasks and achieves comparable performance for intent classification.},
note = {arXiv:2310.17194 [eess]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 9798400700552.
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {9798400700552},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yin, Yufeng; Chang, Di; Song, Guoxian; Sang, Shen; Zhi, Tiancheng; Liu, Jing; Luo, Linjie; Soleymani, Mohammad
FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features Miscellaneous
2023, (arXiv:2308.12380 [cs]).
@misc{yin_fg-net_2023,
title = {FG-Net: Facial Action Unit Detection with Generalizable Pyramidal Features},
author = {Yufeng Yin and Di Chang and Guoxian Song and Shen Sang and Tiancheng Zhi and Jing Liu and Linjie Luo and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.12380},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Automatic detection of facial Action Units (AUs) allows for objective facial expression analysis. Due to the high cost of AU labeling and the limited size of existing benchmarks, previous AU detection methods tend to overfit the dataset, resulting in a significant performance loss when evaluated across corpora. To address this problem, we propose FG-Net for generalizable facial action unit detection. Specifically, FG-Net extracts feature maps from a StyleGAN2 model pre-trained on a large and diverse face image dataset. Then, these features are used to detect AUs with a Pyramid CNN Interpreter, making the training efficient and capturing essential local features. The proposed FG-Net achieves a strong generalization ability for heatmap-based AU detection thanks to the generalizable and semantic-rich features extracted from the pre-trained generative model. Extensive experiments are conducted to evaluate within- and cross-corpus AU detection with the widely-used DISFA and BP4D datasets. Compared with the state-of-the-art, the proposed method achieves superior cross-domain performance while maintaining competitive within-domain performance. In addition, FG-Net is data-efficient and achieves competitive performance even when trained on 1000 samples. Our code will be released at textbackslashurlhttps://github.com/ihp-lab/FG-Net},
note = {arXiv:2308.12380 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Chang, Di; Yin, Yufeng; Li, Zongjian; Tran, Minh; Soleymani, Mohammad
LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis Miscellaneous
2023, (arXiv:2308.10713 [cs]).
@misc{chang_libreface_2023,
title = {LibreFace: An Open-Source Toolkit for Deep Facial Expression Analysis},
author = {Di Chang and Yufeng Yin and Zongjian Li and Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2308.10713},
year = {2023},
date = {2023-08-01},
urldate = {2024-02-21},
publisher = {arXiv},
abstract = {Facial expression analysis is an important tool for human-computer interaction. In this paper, we introduce LibreFace, an open-source toolkit for facial expression analysis. This open-source toolbox offers real-time and offline analysis of facial behavior through deep learning models, including facial action unit (AU) detection, AU intensity estimation, and facial expression recognition. To accomplish this, we employ several techniques, including the utilization of a large-scale pre-trained network, feature-wise knowledge distillation, and task-specific fine-tuning. These approaches are designed to effectively and accurately analyze facial expressions by leveraging visual information, thereby facilitating the implementation of real-time interactive applications. In terms of Action Unit (AU) intensity estimation, we achieve a Pearson Correlation Coefficient (PCC) of 0.63 on DISFA, which is 7% higher than the performance of OpenFace 2.0 while maintaining highly-efficient inference that runs two times faster than OpenFace 2.0. Despite being compact, our model also demonstrates competitive performance to state-of-the-art facial expression analysis methods on AffecNet, FFHQ, and RAF-DB. Our code will be released at https://github.com/ihp-lab/LibreFace},
note = {arXiv:2308.10713 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Kappas, Arvid; Gratch, Jonathan
These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI Journal Article
In: Affec Sci, 2023, ISSN: 2662-2041, 2662-205X.
@article{kappas_these_2023,
title = {These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI},
author = {Arvid Kappas and Jonathan Gratch},
url = {https://link.springer.com/10.1007/s42761-023-00211-3},
doi = {10.1007/s42761-023-00211-3},
issn = {2662-2041, 2662-205X},
year = {2023},
date = {2023-08-01},
urldate = {2023-09-20},
journal = {Affec Sci},
abstract = {Abstract
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.
Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents Journal Article
In: Safety Science, vol. 164, pp. 106175, 2023, ISSN: 09257535.
@article{liu_effectiveness_2023,
title = {Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0925753523001170},
doi = {10.1016/j.ssci.2023.106175},
issn = {09257535},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-22},
journal = {Safety Science},
volume = {164},
pages = {106175},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Okado, Yuko; Shiel, Aaron; Carr, Kayla; Rosenberg, Milton; Rice, Enora; Ostrander, Luke; Ju, Megan; Gutierrez, Cassandra; Ramirez, Dilan; Auerbach, Daniel; Aguirre, Angelica; Swartout, William
MentorStudio: Amplifying diverse voices through rapid, self-authorable virtual mentors Journal Article
In: 2023, (Publisher: Zenodo).
@article{nye_mentorstudio_2023,
title = {MentorStudio: Amplifying diverse voices through rapid, self-authorable virtual mentors},
author = {Benjamin D. Nye and Yuko Okado and Aaron Shiel and Kayla Carr and Milton Rosenberg and Enora Rice and Luke Ostrander and Megan Ju and Cassandra Gutierrez and Dilan Ramirez and Daniel Auerbach and Angelica Aguirre and William Swartout},
url = {https://zenodo.org/record/8226275},
doi = {10.5281/ZENODO.8226275},
year = {2023},
date = {2023-07-01},
urldate = {2024-01-11},
abstract = {Mentoring promotes underserved students' STEM persistence but it is difficult to scale up. Virtual agents can amplify mentors' experiences to larger audiences, which is particularly important for mentors from under-represented backgrounds and for underserved students with less access to mentors. This paper introduces MentorStudio, an online platform that allows real-life mentors to self-record and publish video-based conversational virtual agents. MentorStudio's goals are to increase speed, scheduling flexibility, and autonomy in creating intelligent virtual mentors. MentorStudio platform components are introduced, along with initial feedback regarding usability and acceptance collected from 20 STEM mentors who recorded virtual mentors. Overall, the MentorStudio platform has good ease-of-use and acceptance among mentors and offers a platform capable of recording large number of mentors to expand their reach to an unlimited number of students.},
note = {Publisher: Zenodo},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Saxon, Leslie; Boberg, Jill; Faulk, Robert; Barrett, Trevor
Identifying relationships between compression garments and recovery in a military training environment Technical Report
In Review 2023.
@techreport{saxon_identifying_2023,
title = {Identifying relationships between compression garments and recovery in a military training environment},
author = {Leslie Saxon and Jill Boberg and Robert Faulk and Trevor Barrett},
url = {https://www.researchsquare.com/article/rs-3193173/v1},
doi = {10.21203/rs.3.rs-3193173/v1},
year = {2023},
date = {2023-07-01},
urldate = {2023-09-21},
institution = {In Review},
abstract = {Abstract
Development and maintenance of physical capabilities is an essential part of combat readiness in the military. This readiness requires continuous training and is therefore compromised by injury. Because Service Members (SMs) must be physically and cognitively prepared to conduct multifaceted operations in support of strategic objectives, and because the Department of Defense’s (DoD) non-deployable rate and annual costs associated with treating SMs continue to rise at an alarming rate, finding a far-reaching and efficient solution to prevent such injuries is a high priority. Compression garments (CGs) have become increasingly popular over the past decade in human performance applications, and reportedly facilitate post-exercise recovery by reducing muscle soreness, increasing blood lactate removal, and increasing perception of recovery, but the evidence is mixed, at best. In the current study we explored whether CG use, and duration of use, improves recovery and mitigates muscle soreness effectively in an elite Marine training course. In order to test this, we subjected Service Members to fatiguing exercise and then measured subjective and objective recovery and soreness using participant reports and grip and leg strength over a 72-hour recovery period. Findings from this study suggest that wearing CGs for post training recovery showed significant and moderate positive effects on subjective soreness, fatigue, and perceived level of recovery. We did not find statistically significant effects on physical performance while testing grip or leg strength. These findings suggest that CG may be a beneficial strategy for military training environments to accelerate muscle recovery after high-intensity exercise, without adverse effects to the wearer or negative impact on military training.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Development and maintenance of physical capabilities is an essential part of combat readiness in the military. This readiness requires continuous training and is therefore compromised by injury. Because Service Members (SMs) must be physically and cognitively prepared to conduct multifaceted operations in support of strategic objectives, and because the Department of Defense’s (DoD) non-deployable rate and annual costs associated with treating SMs continue to rise at an alarming rate, finding a far-reaching and efficient solution to prevent such injuries is a high priority. Compression garments (CGs) have become increasingly popular over the past decade in human performance applications, and reportedly facilitate post-exercise recovery by reducing muscle soreness, increasing blood lactate removal, and increasing perception of recovery, but the evidence is mixed, at best. In the current study we explored whether CG use, and duration of use, improves recovery and mitigates muscle soreness effectively in an elite Marine training course. In order to test this, we subjected Service Members to fatiguing exercise and then measured subjective and objective recovery and soreness using participant reports and grip and leg strength over a 72-hour recovery period. Findings from this study suggest that wearing CGs for post training recovery showed significant and moderate positive effects on subjective soreness, fatigue, and perceived level of recovery. We did not find statistically significant effects on physical performance while testing grip or leg strength. These findings suggest that CG may be a beneficial strategy for military training environments to accelerate muscle recovery after high-intensity exercise, without adverse effects to the wearer or negative impact on military training.
Filter
2023
Murawski, Alaine; Ramirez-Zohfeld, Vanessa; Schierer, Allison; Olvera, Charles; Mell, Johnathan; Gratch, Jonathan; Brett, Jeanne; Lindquist, Lee A.
Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers Journal Article
In: Geriatrics, vol. 8, no. 2, pp. 36, 2023, ISSN: 2308-3417, (Number: 2 Publisher: Multidisciplinary Digital Publishing Institute).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{murawski_transforming_2023,
title = {Transforming a Negotiation Framework to Resolve Conflicts among Older Adults and Family Caregivers},
author = {Alaine Murawski and Vanessa Ramirez-Zohfeld and Allison Schierer and Charles Olvera and Johnathan Mell and Jonathan Gratch and Jeanne Brett and Lee A. Lindquist},
url = {https://www.mdpi.com/2308-3417/8/2/36},
doi = {10.3390/geriatrics8020036},
issn = {2308-3417},
year = {2023},
date = {2023-04-01},
urldate = {2023-03-31},
journal = {Geriatrics},
volume = {8},
number = {2},
pages = {36},
abstract = {Background: Family caregivers of older people with Alzheimer’s dementia (PWD) often need to advocate and resolve health-related conflicts (e.g., determining treatment necessity, billing errors, and home health extensions). As they deal with these health system conflicts, family caregivers experience unnecessary frustration, anxiety, and stress. The goal of this research was to apply a negotiation framework to resolve real-world family caregiver–older adult conflicts. Methods: We convened an interdisciplinary team of national community-based family caregivers, social workers, geriatricians, and negotiation experts (n = 9; Illinois, Florida, New York, and California) to examine the applicability of negotiation and conflict management frameworks to three older adult–caregiver conflicts (i.e., caregiver–older adult, caregiver–provider, and caregiver–caregiver). The panel of caregivers provided scenarios and dialogue describing conflicts they experienced in these three settings. A qualitative analysis was then performed grouping the responses into a framework matrix. Results: Upon presenting the three conflicts to the caregivers, 96 responses (caregiver–senior), 75 responses (caregiver–caregiver), and 80 responses (caregiver–provider) were generated. A thematic analysis showed that the statements and responses fit the interest–rights–power (IRP) negotiation framework. Discussion: The interests–rights–power (IRP) framework, used in business negotiations, provided insight into how caregivers experienced conflict with older adults, providers, and other caregivers. Future research is needed to examine applying the IRP framework in the training of caregivers of older people with Alzheimer’s dementia.},
note = {Number: 2
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Feng, Andrew
Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction Proceedings Article
In: 2023 57th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Baltimore, MD, USA, 2023, ISBN: 978-1-66545-181-9.
Links | BibTeX | Tags: Narrative, UARC
@inproceedings{gordon_searching_2023,
title = {Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10089729/},
doi = {10.1109/CISS56502.2023.10089729},
isbn = {978-1-66545-181-9},
year = {2023},
date = {2023-03-01},
urldate = {2023-08-07},
booktitle = {2023 57th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Baltimore, MD, USA},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Hsu, Wan-Yu; Anguera, Joaquin A.; Rizzo, Albert; Campusano, Richard; Chiaravalloti, Nancy D.; DeLuca, John; Gazzaley, Adam; Bove, Riley M.
A virtual reality program to assess cognitive function in multiple sclerosis: A pilot study Journal Article
In: Frontiers in Human Neuroscience, 2023, (Place: Lausanne, Switzerland Publisher: Frontiers Research Foundation Section: ORIGINAL RESEARCH article).
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{hsu_virtual_2023,
title = {A virtual reality program to assess cognitive function in multiple sclerosis: A pilot study},
author = {Wan-Yu Hsu and Joaquin A. Anguera and Albert Rizzo and Richard Campusano and Nancy D. Chiaravalloti and John DeLuca and Adam Gazzaley and Riley M. Bove},
url = {https://www.proquest.com/docview/2787027204/abstract/BEA88F7BB72B4623PQ/1},
doi = {10.3389/fnhum.2023.1139316},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Frontiers in Human Neuroscience},
abstract = {Introduction: Cognitive impairment is a debilitating symptom in people with multiple sclerosis (MS). Most of the neuropsychological tasks have little resemblance to everyday life. There is a need for ecologically valid tools for assessing cognition in real-life functional contexts in MS. One potential solution would involve the use of virtual reality (VR) to exert finer control over the task presentation environment; however, VR studies in the MS population are scarce. Objectives: To explore the utility and feasibility of a VR program for cognitive assessment in MS. Methods: A VR classroom embedded with a continuous performance task (CPT) was assessed in 10 non-MS adults and 10 people with MS with low cognitive functioning. Participants performed the CPT with distractors (ie. WD) and without distractors (ie. ND). The Symbol Digit Modalities Test (SDMT), California Verbal Learning Test – II (CVLT-II), and a feedback survey on the VR program were administered. Results: People with MS exhibited greater reaction time variability (RTV) compared to non-MS participants, and greater RTV in both WD and ND conditions was associated with lower SDMT. Conclusions: VR tools warrant further research to determine their value as an ecologically valid platform for assessing cognition and everyday functioning in people with MS.},
note = {Place: Lausanne, Switzerland
Publisher: Frontiers Research Foundation
Section: ORIGINAL RESEARCH article},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Pynadath, David V.; Dilkina, Bistra; Jeong, David C.; John, Richard S.; Marsella, Stacy C.; Merchant, Chirag; Miller, Lynn C.; Read, Stephen J.
Disaster world Journal Article
In: Comput Math Organ Theory, vol. 29, no. 1, pp. 84–117, 2023, ISSN: 1572-9346.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@article{pynadath_disaster_2023,
title = {Disaster world},
author = {David V. Pynadath and Bistra Dilkina and David C. Jeong and Richard S. John and Stacy C. Marsella and Chirag Merchant and Lynn C. Miller and Stephen J. Read},
url = {https://doi.org/10.1007/s10588-022-09359-y},
doi = {10.1007/s10588-022-09359-y},
issn = {1572-9346},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Comput Math Organ Theory},
volume = {29},
number = {1},
pages = {84–117},
abstract = {Artificial intelligence (AI) research provides a rich source of modeling languages capable of generating socially plausible simulations of human behavior, while also providing a transparent ground truth that can support validation of social-science methods applied to that simulation. In this work, we leverage two established AI representations: decision-theoretic planning and recursive modeling. Decision-theoretic planning (specifically Partially Observable Markov Decision Processes) provides agents with quantitative models of their corresponding real-world entities’ subjective (and possibly incorrect) perspectives of ground truth in the form of probabilistic beliefs and utility functions. Recursive modeling gives an agent a theory of mind, which is necessary when a person’s (again, possibly incorrect) subjective perspectives are of another person, rather than of just his/her environment. We used PsychSim, a multiagent social-simulation framework combining these two AI frameworks, to build a general parameterized model of human behavior during disaster response, grounding the model in social-psychological theories to ensure social plausibility. We then instantiated that model into alternate ground truths for simulating population response to a series of natural disasters, namely, hurricanes. The simulations generate data in response to socially plausible instruments (e.g., surveys) that serve as input to the Ground Truth program’s designated research teams for them to conduct simulated social science. The simulation also provides a graphical ground truth and a set of outcomes to be used as the gold standard in evaluating the research teams’ inferences.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective Journal Article
In: Philosophical Transactions of the Royal Society B: Biological Sciences, vol. 378, no. 1875, pp. 20210475, 2023, (Publisher: Royal Society).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{gratch_promise_2023,
title = {The promise and peril of interactive embodied agents for studying non-verbal communication: a machine learning perspective},
author = {Jonathan Gratch},
url = {https://royalsocietypublishing.org/doi/abs/10.1098/rstb.2021.0475},
doi = {10.1098/rstb.2021.0475},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Philosophical Transactions of the Royal Society B: Biological Sciences},
volume = {378},
number = {1875},
pages = {20210475},
abstract = {In face-to-face interactions, parties rapidly react and adapt to each other's words, movements and expressions. Any science of face-to-face interaction must develop approaches to hypothesize and rigorously test mechanisms that explain such interdependent behaviour. Yet conventional experimental designs often sacrifice interactivity to establish experimental control. Interactive virtual and robotic agents have been offered as a way to study true interactivity while enforcing a measure of experimental control by allowing participants to interact with realistic but carefully controlled partners. But as researchers increasingly turn to machine learning to add realism to such agents, they may unintentionally distort the very interactivity they seek to illuminate, particularly when investigating the role of non-verbal signals such as emotion or active-listening behaviours. Here I discuss some of the methodological challenges that may arise when machine learning is used to model the behaviour of interaction partners. By articulating and explicitly considering these commitments, researchers can transform ‘unintentional distortions’ into valuable methodological tools that yield new insights and better contextualize existing experimental findings that rely on learning technology.
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.},
note = {Publisher: Royal Society},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
This article is part of a discussion meeting issue ‘Face2face: advancing the science of social interaction’.
Awada, Mohamad; Becerik-Gerber, Burcin; Liu, Ruying; Seyedrezaei, Mirmahdi; Lu, Zheng; Xenakis, Matheos; Lucas, Gale; Roll, Shawn C.; Narayanan, Shrikanth
Ten questions concerning the impact of environmental stress on office workers Journal Article
In: Building and Environment, vol. 229, pp. 109964, 2023, ISSN: 0360-1323.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{awada_ten_2023,
title = {Ten questions concerning the impact of environmental stress on office workers},
author = {Mohamad Awada and Burcin Becerik-Gerber and Ruying Liu and Mirmahdi Seyedrezaei and Zheng Lu and Matheos Xenakis and Gale Lucas and Shawn C. Roll and Shrikanth Narayanan},
url = {https://www.sciencedirect.com/science/article/pii/S0360132322011945},
doi = {10.1016/j.buildenv.2022.109964},
issn = {0360-1323},
year = {2023},
date = {2023-02-01},
urldate = {2023-03-31},
journal = {Building and Environment},
volume = {229},
pages = {109964},
abstract = {We regularly face stress during our everyday activities, to the extent that stress is recognized by the World Health Organization as the epidemic of the 21st century. Stress is how humans respond physically and psychologically to adjustments, experiences, conditions, and circumstances in their lives. While there are many reasons for stress, work and job pressure remain the main cause. Thus, companies are increasingly interested in creating healthier, more comfortable, and stress-free offices for their workers. The indoor environment can induce environmental stress when it cannot satisfy the individual needs for health and comfort. In fact, office environmental conditions (e.g., thermal, and indoor air conditions, lighting, and noise) and interior design parameters (e.g., office layout, colors, furniture, access to views, distance to window, personal control and biophilic design) have been found to affect office workers' stress levels. A line of research based on the stress recovery theory offers new insights for establishing offices that limit environmental stress and help with work stress recovery. To that end, this paper answers ten questions that explore the relation between the indoor office-built environment and stress levels among workers. The answers to the ten questions are based on an extensive literature review to draw conclusions from what has been achieved to date. Thus, this study presents a foundation for future environmental stress related research in offices.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Nina; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.)
Artificial intelligence in education: 24th international conference, AIED 2023, Tokyo, Japan, July 3-7, 2023: proceedings Book
Springer, Cham, 2023, ISBN: 978-3-031-36271-2, (Meeting Name: International Conference on Artificial Intelligence in Education).
@book{wang_artificial_2023,
title = {Artificial intelligence in education: 24th international conference, AIED 2023, Tokyo, Japan, July 3-7, 2023: proceedings},
editor = {Nina Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
isbn = {978-3-031-36271-2},
year = {2023},
date = {2023-01-01},
number = {13916},
publisher = {Springer},
address = {Cham},
series = {Lecture notes in computer science Lecture notes in artificial intelligence},
abstract = {This book constitutes the refereed proceedings of the 24th International Conference on Artificial Intelligence in Education, AIED 2023, held in Tokyo, Japan, during July 3-7, 2023. This event took place in hybrid mode. The 53 full papers and 26 short papers presented in this book were carefully reviewed and selected from 311 submissions. The papers present result in high-quality research on intelligent systems and the cognitive sciences for the improvement and advancement of education. The conference was hosted by the prestigious International Artificial Intelligence in Education Society, a global association of researchers and academics specializing in the many fields that comprise AIED, including, but not limited to, computer science, learning sciences, and education},
note = {Meeting Name: International Conference on Artificial Intelligence in Education},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Goel, Rahul; Tse, Teresa; Smith, Lia J.; Floren, Andrew; Naylor, Bruce; Williams, M. Wright; Salas, Ramiro; Rizzo, Albert S.; Ress, David
Framework for Accurate Classification of Self-Reported Stress From Multisession Functional MRI Data of Veterans With Posttraumatic Stress Journal Article
In: Chronic Stress, vol. 7, pp. 24705470231203655, 2023, ISSN: 2470-5470, 2470-5470.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{goel_framework_2023,
title = {Framework for Accurate Classification of Self-Reported Stress From Multisession Functional MRI Data of Veterans With Posttraumatic Stress},
author = {Rahul Goel and Teresa Tse and Lia J. Smith and Andrew Floren and Bruce Naylor and M. Wright Williams and Ramiro Salas and Albert S. Rizzo and David Ress},
url = {http://journals.sagepub.com/doi/10.1177/24705470231203655},
doi = {10.1177/24705470231203655},
issn = {2470-5470, 2470-5470},
year = {2023},
date = {2023-01-01},
urldate = {2023-12-07},
journal = {Chronic Stress},
volume = {7},
pages = {24705470231203655},
abstract = {Background: Posttraumatic stress disorder (PTSD) is a significant burden among combat Veterans returning from the wars in Iraq and Afghanistan. While empirically supported treatments have demonstrated reductions in PTSD symptomatology, there remains a need to improve treatment effectiveness. Functional magnetic resonance imaging (fMRI) neurofeedback has emerged as a possible treatment to ameliorate PTSD symptom severity. Virtual reality (VR) approaches have also shown promise in increasing treatment compliance and outcomes. To facilitate fMRI neurofeedback-associated therapies, it would be advantageous to accurately classify internal brain stress levels while Veterans are exposed to trauma-associated VR imagery. Methods: Across 2 sessions, we used fMRI to collect neural responses to trauma-associated VR-like stimuli among male combat Veterans with PTSD symptoms (N = 8). Veterans reported their self-perceived stress level on a scale from 1 to 8 every 15 s throughout the fMRI sessions. In our proposed framework, we precisely sample the fMRI data on cortical gray matter, blurring the data along the gray-matter manifold to reduce noise and dimensionality while preserving maximum neural information. Then, we independently applied 3 machine learning (ML) algorithms to this fMRI data collected across 2 sessions, separately for each Veteran, to build individualized ML models that predicted their internal brain states (self-reported stress responses). Results: We accurately classified the 8-class self-reported stress responses with a mean (± standard error) root mean square error of 0.6 (± 0.1) across all Veterans using the best ML approach. Conclusions: The findings demonstrate the predictive ability of ML algorithms applied to whole-brain cortical fMRI data collected during individual Veteran sessions. The framework we have developed to preprocess whole-brain cortical fMRI data and train ML models across sessions would provide a valuable tool to enable individualized real-time fMRI neurofeedback during VR-like exposure therapy for PTSD.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Wang, Timothy S.; Gordon, Andrew S.
Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5 Book Section
In: Holloway-Attaway, Lissa; Murray, John T. (Ed.): Interactive Storytelling, vol. 14384, pp. 297–305, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-47657-0 978-3-031-47658-7, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: Narrative, UARC
@incollection{holloway-attaway_playing_2023,
title = {Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5},
author = {Timothy S. Wang and Andrew S. Gordon},
editor = {Lissa Holloway-Attaway and John T. Murray},
url = {https://link.springer.com/10.1007/978-3-031-47658-7_28},
doi = {10.1007/978-3-031-47658-7_28},
isbn = {978-3-031-47657-0 978-3-031-47658-7},
year = {2023},
date = {2023-01-01},
urldate = {2023-12-07},
booktitle = {Interactive Storytelling},
volume = {14384},
pages = {297–305},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D; Mee, Dillon; Core, Mark G
Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns Proceedings Article
In: 2023.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@inproceedings{nye_generative_2023,
title = {Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns},
author = {Benjamin D Nye and Dillon Mee and Mark G Core},
url = {https://ceur-ws.org/Vol-3487/paper4.pdf},
year = {2023},
date = {2023-01-01},
abstract = {After many years of relatively limited capabilities for generative language models, recent large language models (LLM’s) have demonstrated qualitatively better capabilities for understanding, synthesis, and inference on text. Due to the prominence of ChatGPT’s chat system, both the media and many educational developers have suggested using generative AI to directly tutor students. However, despite surface-level similarity between ChatGPT interactions and tutoring dialogs, generative AI has other strengths which may be substantially more relevant for intelligent tutoring (e.g., detecting misconceptions, improved language translation, content generation) and weaknesses that make it problematic for on-the-fly tutoring (e.g., hallucinations, lack of pedagogical training data). In this paper, we discuss how we are approaching generative LLM’s for tutoring dialogs, for problems such as multi- concept short answer grading and semi-supervised interactive content generation. This work shows interesting opportunities for prompt engineering approaches for short-answer classification, despite sometimes quirky behavior. The time savings for high-quality content generation for tutoring is not yet clear and further research is needed. The paper concludes with a consideration of longer-term equity and access in a world where essential capabilities require low-latency real-time connections to large, pay-peruse models. Risks and mitigating technologies for this kind of “AI digital divide” are discussed, including optimized / edge-computing LLM’s and using generative AI models as simulated students to train specialized tutoring models.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn; Liu, Ruying
A New Perspective on Stress Detection: An Automated Approach for Detecting Eustress and Distress Journal Article
In: IEEE Trans. Affective Comput., pp. 1–15, 2023, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: Machine Learning, UARC
@article{awada_new_2023,
title = {A New Perspective on Stress Detection: An Automated Approach for Detecting Eustress and Distress},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll and Ruying Liu},
url = {https://ieeexplore.ieee.org/document/10286408/},
doi = {10.1109/TAFFC.2023.3324910},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-01-01},
urldate = {2023-12-07},
journal = {IEEE Trans. Affective Comput.},
pages = {1–15},
keywords = {Machine Learning, UARC},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis Book Section
In: Kurosu, Masaaki; Hashizume, Ayako (Ed.): Human-Computer Interaction, vol. 14013, pp. 407–418, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35601-8 978-3-031-35602-5, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: AI, Virtual Humans
@incollection{kurosu_relationship_2023,
title = {The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu and Ayako Hashizume},
url = {https://link.springer.com/10.1007/978-3-031-35602-5_29},
doi = {10.1007/978-3-031-35602-5_29},
isbn = {978-3-031-35601-8 978-3-031-35602-5},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Human-Computer Interaction},
volume = {14013},
pages = {407–418},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Tak, Ala N.; Gratch, Jonathan
Is GPT a Computational Model of Emotion? Detailed Analysis Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{tak_is_2023,
title = {Is GPT a Computational Model of Emotion? Detailed Analysis},
author = {Ala N. Tak and Jonathan Gratch},
url = {https://arxiv.org/abs/2307.13779},
doi = {10.48550/ARXIV.2307.13779},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
abstract = {This paper investigates the emotional reasoning abilities of the GPT family of large language models via a component perspective. The paper first examines how the model reasons about autobiographical memories. Second, it systematically varies aspects of situations to impact emotion intensity and coping tendencies. Even without the use of prompt engineering, it is shown that GPT's predictions align significantly with human-provided appraisals and emotional labels. However, GPT faces difficulties predicting emotion intensity and coping responses. GPT-4 showed the highest performance in the initial study but fell short in the second, despite providing superior results after minor prompt engineering. This assessment brings up questions on how to effectively employ the strong points and address the weak areas of these models, particularly concerning response variability. These studies underscore the merits of evaluating models from a componential perspective.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Teaching Reverse Appraisal to Improve Negotiation Skills Journal Article
In: IEEE Trans. Affective Comput., pp. 1–14, 2023, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{sato_teaching_2023,
title = {Teaching Reverse Appraisal to Improve Negotiation Skills},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/10189838/},
doi = {10.1109/TAFFC.2023.3285931},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
journal = {IEEE Trans. Affective Comput.},
pages = {1–14},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; Gil, Yolanda
Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation Book Section
In: Wang, Ning; Rebolledo-Mendez, Genaro; Dimitrova, Vania; Matsuda, Noboru; Santos, Olga C. (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky, vol. 1831, pp. 530–535, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36335-1 978-3-031-36336-8, (Series Title: Communications in Computer and Information Science).
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{wang_virtual_2023,
title = {Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch and Yolanda Gil},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Vania Dimitrova and Noboru Matsuda and Olga C. Santos},
url = {https://link.springer.com/10.1007/978-3-031-36336-8_82},
doi = {10.1007/978-3-031-36336-8_82},
isbn = {978-3-031-36335-1 978-3-031-36336-8},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky},
volume = {1831},
pages = {530–535},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Melo, Celso M. De; Gratch, Jonathan; Marsella, Stacy; Pelachaud, Catherine
Social Functions of Machine Emotional Expressions Journal Article
In: Proc. IEEE, pp. 1–16, 2023, ISSN: 0018-9219, 1558-2256.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{de_melo_social_2023,
title = {Social Functions of Machine Emotional Expressions},
author = {Celso M. De Melo and Jonathan Gratch and Stacy Marsella and Catherine Pelachaud},
url = {https://ieeexplore.ieee.org/document/10093227/},
doi = {10.1109/JPROC.2023.3261137},
issn = {0018-9219, 1558-2256},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
journal = {Proc. IEEE},
pages = {1–16},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lu, Shuhong; Yoon, Youngwoo; Feng, Andrew
Co-Speech Gesture Synthesis using Discrete Gesture Token Learning Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lu_co-speech_2023,
title = {Co-Speech Gesture Synthesis using Discrete Gesture Token Learning},
author = {Shuhong Lu and Youngwoo Yoon and Andrew Feng},
url = {https://arxiv.org/abs/2303.12822},
doi = {10.48550/ARXIV.2303.12822},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
abstract = {Synthesizing realistic co-speech gestures is an important and yet unsolved problem for creating believable motions that can drive a humanoid robot to interact and communicate with human users. Such capability will improve the impressions of the robots by human users and will find applications in education, training, and medical services. One challenge in learning the co-speech gesture model is that there may be multiple viable gesture motions for the same speech utterance. The deterministic regression methods can not resolve the conflicting samples and may produce over-smoothed or damped motions. We proposed a two-stage model to address this uncertainty issue in gesture synthesis by modeling the gesture segments as discrete latent codes. Our method utilizes RQ-VAE in the first stage to learn a discrete codebook consisting of gesture tokens from training data. In the second stage, a two-level autoregressive transformer model is used to learn the prior distribution of residual codes conditioned on input speech context. Since the inference is formulated as token sampling, multiple gesture sequences could be generated given the same speech input using top-k sampling. The quantitative results and the user study showed the proposed method outperforms the previous methods and is able to generate realistic and diverse gesture motions.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Pynadath, David; Wang, Ning
My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes Book Section
In: vol. 14051, pp. 232–248, 2023, (arXiv:2301.09011 [cs]).
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@incollection{gurney_my_2023,
title = {My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes},
author = {Nikolos Gurney and David Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2301.09011},
doi = {10.1007/978-3-031-35894-4_17},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {14051},
pages = {232–248},
abstract = {An implicit expectation of asking users to rate agents, such as an AI decision-aid, is that they will use only relevant information – ask them about an agent's benevolence, and they should consider whether or not it was kind. Behavioral science, however, suggests that people sometimes use irrelevant information. We identify an instance of this phenomenon, where users who experience better outcomes in a human-agent interaction systematically rated the agent as having better abilities, being more benevolent, and exhibiting greater integrity in a post hoc assessment than users who experienced worse outcome – which were the result of their own behavior – with the same agent. Our analyses suggest the need for augmentation of models so that they account for such biased perceptions as well as mechanisms so that agents can detect and even actively work to correct this and similar biases of users.},
note = {arXiv:2301.09011 [cs]},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions Book Section
In: vol. 13832, pp. 175–197, 2023, (arXiv:2302.01854 [cs]).
Abstract | Links | BibTeX | Tags: AI, Social Simulation, UARC
@incollection{gurney_comparing_2023,
title = {Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2302.01854},
doi = {10.1007/978-3-031-30933-5_12},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {13832},
pages = {175–197},
abstract = {Optimization of human-AI teams hinges on the AI's ability to tailor its interaction to individual human teammates. A common hypothesis in adaptive AI research is that minor differences in people's predisposition to trust can significantly impact their likelihood of complying with recommendations from the AI. Predisposition to trust is often measured with self-report inventories that are administered before interactions. We benchmark a popular measure of this kind against behavioral predictors of compliance. We find that the inventory is a less effective predictor of compliance than the behavioral measures in datasets taken from three previous research projects. This suggests a general property that individual differences in initial behavior are more predictive than differences in self-reported trust attitudes. This result also shows a potential for easily accessible behavioral measures to provide an AI with more accurate models without the use of (often costly) survey instruments.},
note = {arXiv:2302.01854 [cs]},
keywords = {AI, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. Volume 45, 2023.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{hale_risk_2023,
title = {Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://escholarship.org/uc/item/7n01v4f9#main},
year = {2023},
date = {2023-01-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {Volume 45},
abstract = {Women and minorities obtain lower salaries when negotiating their employment compensation. Some have suggested that automated negotiation and dispute-resolution technology might address such material inequities. These algorithms elicit the multi-criteria preferences of each side of a dispute and arrive at solutions that are efficient and "provably" fair. In a study that explores the potential benefit of these methods, we highlight cognitive factors that may allow inequities to persist despite these methods. Specifically, risk-averse individuals express lower preferences for salary and as risk-aversion is more common in women and minorities, this translates into a ``provably'' fair lower salary. While this may reflect actual underlying differences in preferences across groups, individuals may be confounding their preferences for salary with their risk preference (i.e., their fear of not reaching an agreement), such that these groups achieve worse outcomes than they should. We further highlight that methodological choices in how negotiation processes are often studied can obscure the magnitude of this effect.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Yang, Jing; Xiao, Hanyuan; Teng, Wenbin; Cai, Yunxuan; Zhao, Yajie
Light Sampling Field and BRDF Representation for Physically-based Neural Rendering Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: UARC, VGL
@article{yang_light_2023,
title = {Light Sampling Field and BRDF Representation for Physically-based Neural Rendering},
author = {Jing Yang and Hanyuan Xiao and Wenbin Teng and Yunxuan Cai and Yajie Zhao},
url = {https://arxiv.org/abs/2304.05472},
doi = {10.48550/ARXIV.2304.05472},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-22},
abstract = {Physically-based rendering (PBR) is key for immersive rendering effects used widely in the industry to showcase detailed realistic scenes from computer graphics assets. A well-known caveat is that producing the same is computationally heavy and relies on complex capture devices. Inspired by the success in quality and efficiency of recent volumetric neural rendering, we want to develop a physically-based neural shader to eliminate device dependency and significantly boost performance. However, no existing lighting and material models in the current neural rendering approaches can accurately represent the comprehensive lighting models and BRDFs properties required by the PBR process. Thus, this paper proposes a novel lighting representation that models direct and indirect light locally through a light sampling strategy in a learned light sampling field. We also propose BRDF models to separately represent surface/subsurface scattering details to enable complex objects such as translucent material (i.e., skin, jade). We then implement our proposed representations with an end-to-end physically-based neural face skin shader, which takes a standard face asset (i.e., geometry, albedo map, and normal map) and an HDRI for illumination as inputs and generates a photo-realistic rendering as output. Extensive experiments showcase the quality and efficiency of our PBR face skin shader, indicating the effectiveness of our proposed lighting and material representations.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {UARC, VGL},
pubstate = {published},
tppubtype = {article}
}
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-01-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-01-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Koenig, Sebastian; Lange, Belinda
Clinical virtual reality: The state of the science. Book Section
In: Brown, Gregory G.; Crosson, Bruce; Haaland, Kathleen Y.; King, Tricia Z. (Ed.): APA handbook of neuropsychology, Volume 2: Neuroscience and neuromethods (Vol. 2)., pp. 473–491, American Psychological Association, Washington, 2023, ISBN: 978-1-4338-4001-2 978-1-4338-4002-9.
@incollection{brown_clinical_2023,
title = {Clinical virtual reality: The state of the science.},
author = {Albert Rizzo and Sebastian Koenig and Belinda Lange},
editor = {Gregory G. Brown and Bruce Crosson and Kathleen Y. Haaland and Tricia Z. King},
url = {http://content.apa.org/books/17303-023},
doi = {10.1037/0000308-023},
isbn = {978-1-4338-4001-2 978-1-4338-4002-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
booktitle = {APA handbook of neuropsychology, Volume 2: Neuroscience and neuromethods (Vol. 2).},
pages = {473–491},
publisher = {American Psychological Association},
address = {Washington},
keywords = {MedVR},
pubstate = {published},
tppubtype = {incollection}
}
Rosenbloom, Paul S.
Rethinking the Physical Symbol Systems Hypothesis Book Section
In: Hammer, Patrick; Alirezaie, Marjan; Strannegård, Claes (Ed.): Artificial General Intelligence, vol. 13921, pp. 207–216, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-33468-9 978-3-031-33469-6, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: CogArch, Cognitive Architecture
@incollection{hammer_rethinking_2023,
title = {Rethinking the Physical Symbol Systems Hypothesis},
author = {Paul S. Rosenbloom},
editor = {Patrick Hammer and Marjan Alirezaie and Claes Strannegård},
url = {https://link.springer.com/10.1007/978-3-031-33469-6_21},
doi = {10.1007/978-3-031-33469-6_21},
isbn = {978-3-031-33468-9 978-3-031-33469-6},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
booktitle = {Artificial General Intelligence},
volume = {13921},
pages = {207–216},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {CogArch, Cognitive Architecture},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pynadath, David V.; Gurney, Nikolos
The Design of Transparency Communication for Human-Multirobot Teams Book Section
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, vol. 14051, pp. 311–321, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35893-7 978-3-031-35894-4, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: AI, Virtual Humans
@incollection{degen_design_2023,
title = {The Design of Transparency Communication for Human-Multirobot Teams},
author = {Ning Wang and David V. Pynadath and Nikolos Gurney},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/10.1007/978-3-031-35894-4_23},
doi = {10.1007/978-3-031-35894-4_23},
isbn = {978-3-031-35893-7 978-3-031-35894-4},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
booktitle = {Artificial Intelligence in HCI},
volume = {14051},
pages = {311–321},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {AI, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Wu, Haochen; Sequeira, Pedro; Pynadath, David V.
Multiagent Inverse Reinforcement Learning via Theory of Mind Reasoning Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: AI, Social Simulation
@article{wu_multiagent_2023,
title = {Multiagent Inverse Reinforcement Learning via Theory of Mind Reasoning},
author = {Haochen Wu and Pedro Sequeira and David V. Pynadath},
url = {https://arxiv.org/abs/2302.10238},
doi = {10.48550/ARXIV.2302.10238},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
abstract = {We approach the problem of understanding how people interact with each other in collaborative settings, especially when individuals know little about their teammates, via Multiagent Inverse Reinforcement Learning (MIRL), where the goal is to infer the reward functions guiding the behavior of each individual given trajectories of a team's behavior during some task. Unlike current MIRL approaches, we do not assume that team members know each other's goals a priori; rather, that they collaborate by adapting to the goals of others perceived by observing their behavior, all while jointly performing a task. To address this problem, we propose a novel approach to MIRL via Theory of Mind (MIRL-ToM). For each agent, we first use ToM reasoning to estimate a posterior distribution over baseline reward profiles given their demonstrated behavior. We then perform MIRL via decentralized equilibrium by employing single-agent Maximum Entropy IRL to infer a reward function for each agent, where we simulate the behavior of other teammates according to the time-varying distribution over profiles. We evaluate our approach in a simulated 2-player search-and-rescue operation where the goal of the agents, playing different roles, is to search for and evacuate victims in the environment. Our results show that the choice of baseline profiles is paramount to the recovery of the ground-truth rewards, and that MIRL-ToM is able to recover the rewards used by agents interacting both with known and unknown teammates.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {AI, Social Simulation},
pubstate = {published},
tppubtype = {article}
}
Yu, Zifan; Chen, Meida; Zhang, Zhikang; You, Suya; Ren, Fengbo
TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: STG, UARC
@article{yu_transupr_2023,
title = {TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation},
author = {Zifan Yu and Meida Chen and Zhikang Zhang and Suya You and Fengbo Ren},
url = {https://arxiv.org/abs/2302.08594},
doi = {10.48550/ARXIV.2302.08594},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
abstract = {In this work, we target the problem of uncertain points refinement for image-based LiDAR point cloud semantic segmentation (LiDAR PCSS). This problem mainly results from the boundary-blurring problem of convolution neural networks (CNNs) and quantitation loss of spherical projection, which are often hard to avoid for common image-based LiDAR PCSS approaches. We propose a plug-and-play transformer-based uncertain point refiner (TransUPR) to address the problem. Through local feature aggregation, uncertain point localization, and self-attention-based transformer design, TransUPR, integrated into an existing range image-based LiDAR PCSS approach (e.g., CENet), achieves the state-of-the-art performance (68.2% mIoU) on Semantic-KITTI benchmark, which provides a performance improvement of 0.6% on the mIoU.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Vlake, Johan H.; Bommel, Jasper; Riva, Giuseppe; Wiederhold, Brenda K.; Cipresso, Pietro; Rizzo, Albert Skip; Botella, Cristina; Hooft, Lotty; Bienvenu, O. Joseph; Geerts, Bart; Wils, Evert-Jan; Gommers, Diederik; Genderen, Michel E.
Reporting the early stage clinical evaluation of virtual-reality-based intervention trials: RATE-VR Journal Article
In: Nat Med, vol. 29, no. 1, pp. 12–13, 2023, ISSN: 1546-170X, (Number: 1 Publisher: Nature Publishing Group).
Links | BibTeX | Tags: MedVR, UARC
@article{vlake_reporting_2023,
title = {Reporting the early stage clinical evaluation of virtual-reality-based intervention trials: RATE-VR},
author = {Johan H. Vlake and Jasper Bommel and Giuseppe Riva and Brenda K. Wiederhold and Pietro Cipresso and Albert Skip Rizzo and Cristina Botella and Lotty Hooft and O. Joseph Bienvenu and Bart Geerts and Evert-Jan Wils and Diederik Gommers and Michel E. Genderen},
url = {https://www.nature.com/articles/s41591-022-02085-7},
doi = {10.1038/s41591-022-02085-7},
issn = {1546-170X},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Nat Med},
volume = {29},
number = {1},
pages = {12–13},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale M.; Gratch, Jonathan
Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{chawla_towards_2023,
title = {Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale M. Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/abstract/document/10021626},
doi = {10.1109/TAFFC.2023.3238007},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {Negotiation is a complex social interaction that encapsulates emotional encounters in human decision-making. Virtual agents that can negotiate with humans by the means of language are useful in pedagogy and conversational AI. To advance the development of such agents, we explore the role of emotion in the prediction of two important subjective goals in a negotiation – outcome satisfaction and partner perception. We devise ways to measure and compare different degrees of emotion expression in negotiation dialogues, consisting of emoticon, lexical, and contextual variables. Through an extensive analysis of a large-scale dataset in chat-based negotiations, we find that incorporating emotion expression explains significantly more variance, above and beyond the demographics and personality traits of the participants. Further, our temporal analysis reveals that emotive information from both early and later stages of the negotiation contributes to this prediction, indicating the need for a continual learning model of capturing emotion for automated agents. Finally, we extend our analysis to another dataset, showing promise that our findings generalize to more complex scenarios. We conclude by discussing our insights, which will be helpful for designing adaptive negotiation agents that interact through realistic communication interfaces.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lei, Su; Gratch, Jonathan
Emotional Expressivity is a Reliable Signal of Surprise Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lei_emotional_2023,
title = {Emotional Expressivity is a Reliable Signal of Surprise},
author = {Su Lei and Jonathan Gratch},
doi = {10.1109/TAFFC.2023.3234015},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {We consider the problem of inferring what happened to a person in a social task from momentary facial reactions. To approach this, we introduce several innovations. First, rather than predicting what (observers think) someone feels, we predict objective features of the event that immediately preceded the facial reactions. Second, we draw on appraisal theory, a key psychological theory of emotion, to characterize features of this immediately-preceded event. Specifically, we explore if facial expressions reveal if the event is expected, goal-congruent, and norm-compatible. Finally, we argue that emotional expressivity serves as a better feature for characterizing momentary expressions than traditional facial features. Specifically, we use supervised machine learning to predict third-party judgments of emotional expressivity with high accuracy, and show this model improves inferences about the nature of the event that preceded an emotional reaction. Contrary to common sense, “genuine smiles” failed to predict if an event advanced a person's goals. Rather, expressions best revealed if an event violated expectations. We discussed the implications of these findings for the interpretation of facial displays and potential limitations that could impact the generality of these findings.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale M.; Mell, Johnathan; Boberg, Jill; Zenone, Forrest; Visser, Ewart J.; Tossell, Chad; Seech, Todd
Customizing virtual interpersonal skills training applications may not improve trainee performance Journal Article
In: Sci Rep, vol. 13, no. 1, pp. 78, 2023, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lucas_customizing_2023,
title = {Customizing virtual interpersonal skills training applications may not improve trainee performance},
author = {Gale M. Lucas and Johnathan Mell and Jill Boberg and Forrest Zenone and Ewart J. Visser and Chad Tossell and Todd Seech},
url = {https://www.nature.com/articles/s41598-022-27154-2},
doi = {10.1038/s41598-022-27154-2},
issn = {2045-2322},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {13},
number = {1},
pages = {78},
abstract = {While some theoretical perspectives imply that the context of a virtual training should be customized to match the intended context where those skills would ultimately be applied, others suggest this might not be necessary for learning. It is important to determine whether manipulating context matters for performance in training applications because customized virtual training systems made for specific use cases are more costly than generic “off-the-shelf” ones designed for a broader set of users. Accordingly, we report a study where military cadets use a virtual platform to practice their negotiation skills, and are randomly assigned to one of two virtual context conditions: military versus civilian. Out of 28 measures capturing performance in the negotiation, there was only one significant result: cadets in the civilian condition politely ask the agent to make an offer significantly more than those in the military condition. These results imply that—for this interpersonal skills application, and perhaps ones like it—virtual context may matter very little for performance during social skills training, and that commercial systems may yield real benefits to military scenarios with little-to-no modification.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Adami, Pooya; Singh, Rashmi; Rodrigues, Patrick Borges; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
In: Advanced Engineering Informatics, vol. 55, pp. 101837, 2023, ISSN: 1474-0346.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{adami_participants_2023,
title = {Participants matter: Effectiveness of VR-based training on the knowledge, trust in the robot, and self-efficacy of construction workers and university students},
author = {Pooya Adami and Rashmi Singh and Patrick Borges Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S1474034622002956},
doi = {10.1016/j.aei.2022.101837},
issn = {1474-0346},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Advanced Engineering Informatics},
volume = {55},
pages = {101837},
abstract = {Virtual Reality (VR)-based training has gained attention from the scientific community in the Architecture, Engineering, and Construction (AEC) industry as a cost-effective and safe method that eliminates the safety risks that may impose on workers during the training compared to traditional training methods (e.g., in-person hands-on training, apprenticeship). Although researchers have developed VR-based training for construction workers, some have recruited students rather than workers to understand the effect of their VR-based training. However, students are different from construction workers in many ways, which can threaten the validity of such studies. Hence, research is needed to investigate the extent to which the findings of a VR-based training study are contingent on whether students or construction workers were used as the study sample. This paper strives to compare the effectiveness of VR-based training on university students’ and construction workers’ knowledge acquisition, trust in the robot, and robot operation self-efficacy in remote operation of a construction robot. Twenty-five construction workers and twenty-five graduate construction engineering students were recruited to complete a VR-based training for remote operating a demolition robot. We used quantitative analyses to answer our research questions. Our study shows that the results are dependent on the target sample in that students gained more knowledge, whereas construction workers gained more trust in the robot and more self-efficacy in robot operation. These findings suggest that the effectiveness of VR-based training on students may not necessarily associate with its effectiveness on construction workers.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2022
Harvey, Philip D.; Depp, Colin A.; Rizzo, Albert A.; Strauss, Gregory P.; Spelber, David; Carpenter, Linda L.; Kalin, Ned H.; Krystal, John H.; McDonald, William M.; Nemeroff, Charles B.; Rodriguez, Carolyn I.; Widge, Alik S.; Torous, John
Technology and Mental Health: State of the Art for Assessment and Treatment Journal Article
In: AJP, vol. 179, no. 12, pp. 897–914, 2022, ISSN: 0002-953X, 1535-7228.
Links | BibTeX | Tags: MedVR, UARC
@article{harvey_technology_2022,
title = {Technology and Mental Health: State of the Art for Assessment and Treatment},
author = {Philip D. Harvey and Colin A. Depp and Albert A. Rizzo and Gregory P. Strauss and David Spelber and Linda L. Carpenter and Ned H. Kalin and John H. Krystal and William M. McDonald and Charles B. Nemeroff and Carolyn I. Rodriguez and Alik S. Widge and John Torous},
url = {http://ajp.psychiatryonline.org/doi/10.1176/appi.ajp.21121254},
doi = {10.1176/appi.ajp.21121254},
issn = {0002-953X, 1535-7228},
year = {2022},
date = {2022-12-01},
urldate = {2023-08-22},
journal = {AJP},
volume = {179},
number = {12},
pages = {897–914},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Maihofer, Adam X.; Engchuan, Worrawat; Huguet, Guillaume; Klein, Marieke; MacDonald, Jeffrey R.; Shanta, Omar; Thiruvahindrapuram, Bhooma; Jean-louis, Martineau; Saci, Zohra; Jacquemont, Sebastien; Scherer, Stephen W.; Ketema, Elizabeth; Aiello, Allison E.; Amstadter, Ananda B.; Avdibegović, Esmina; Babic, Dragan; Baker, Dewleen G.; Bisson, Jonathan I.; Boks, Marco P.; Bolger, Elizabeth A.; Bryant, Richard A.; Bustamante, Angela C.; Caldas-de-Almeida, Jose Miguel; Cardoso, Graça; Deckert, Jurgen; Delahanty, Douglas L.; Domschke, Katharina; Dunlop, Boadie W.; Dzubur-Kulenovic, Alma; Evans, Alexandra; Feeny, Norah C.; Franz, Carol E.; Gautam, Aarti; Geuze, Elbert; Goci, Aferdita; Hammamieh, Rasha; Jakovljevic, Miro; Jett, Marti; Jones, Ian; Kaufman, Milissa L.; Kessler, Ronald C.; King, Anthony P.; Kremen, William S.; Lawford, Bruce R.; Lebois, Lauren A. M.; Lewis, Catrin; Liberzon, Israel; Linnstaedt, Sarah D.; Lugonja, Bozo; Luykx, Jurjen J.; Lyons, Michael J.; Mavissakalian, Matig R.; McLaughlin, Katie A.; McLean, Samuel A.; Mehta, Divya; Mellor, Rebecca; Morris, Charles Phillip; Muhie, Seid; Orcutt, Holly K.; Peverill, Matthew; Ratanatharathorn, Andrew; Risbrough, Victoria B.; Rizzo, Albert; Roberts, Andrea L.; Rothbaum, Alex O.; Rothbaum, Barbara O.; Roy-Byrne, Peter; Ruggiero, Kenneth J.; Rutten, Bart P. F.; Schijven, Dick; Seng, Julia S.; Sheerin, Christina M.; Sorenson, Michael A.; Teicher, Martin H.; Uddin, Monica; Ursano, Robert J.; Vinkers, Christiaan H.; Voisey, Joanne; Weber, Heike; Winternitz, Sherry; Xavier, Miguel; Yang, Ruoting; Young, Ross McD; Zoellner, Lori A.; Salem, Rany M.; Shaffer, Richard A.; Wu, Tianying; Ressler, Kerry J.; Stein, Murray B.; Koenen, Karestan C.; Sebat, Jonathan; Nievergelt, Caroline M.
Rare copy number variation in posttraumatic stress disorder Journal Article
In: Mol Psychiatry, vol. 27, no. 12, pp. 5062–5069, 2022, ISSN: 1476-5578, (Number: 12 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{maihofer_rare_2022,
title = {Rare copy number variation in posttraumatic stress disorder},
author = {Adam X. Maihofer and Worrawat Engchuan and Guillaume Huguet and Marieke Klein and Jeffrey R. MacDonald and Omar Shanta and Bhooma Thiruvahindrapuram and Martineau Jean-louis and Zohra Saci and Sebastien Jacquemont and Stephen W. Scherer and Elizabeth Ketema and Allison E. Aiello and Ananda B. Amstadter and Esmina Avdibegović and Dragan Babic and Dewleen G. Baker and Jonathan I. Bisson and Marco P. Boks and Elizabeth A. Bolger and Richard A. Bryant and Angela C. Bustamante and Jose Miguel Caldas-de-Almeida and Graça Cardoso and Jurgen Deckert and Douglas L. Delahanty and Katharina Domschke and Boadie W. Dunlop and Alma Dzubur-Kulenovic and Alexandra Evans and Norah C. Feeny and Carol E. Franz and Aarti Gautam and Elbert Geuze and Aferdita Goci and Rasha Hammamieh and Miro Jakovljevic and Marti Jett and Ian Jones and Milissa L. Kaufman and Ronald C. Kessler and Anthony P. King and William S. Kremen and Bruce R. Lawford and Lauren A. M. Lebois and Catrin Lewis and Israel Liberzon and Sarah D. Linnstaedt and Bozo Lugonja and Jurjen J. Luykx and Michael J. Lyons and Matig R. Mavissakalian and Katie A. McLaughlin and Samuel A. McLean and Divya Mehta and Rebecca Mellor and Charles Phillip Morris and Seid Muhie and Holly K. Orcutt and Matthew Peverill and Andrew Ratanatharathorn and Victoria B. Risbrough and Albert Rizzo and Andrea L. Roberts and Alex O. Rothbaum and Barbara O. Rothbaum and Peter Roy-Byrne and Kenneth J. Ruggiero and Bart P. F. Rutten and Dick Schijven and Julia S. Seng and Christina M. Sheerin and Michael A. Sorenson and Martin H. Teicher and Monica Uddin and Robert J. Ursano and Christiaan H. Vinkers and Joanne Voisey and Heike Weber and Sherry Winternitz and Miguel Xavier and Ruoting Yang and Ross McD Young and Lori A. Zoellner and Rany M. Salem and Richard A. Shaffer and Tianying Wu and Kerry J. Ressler and Murray B. Stein and Karestan C. Koenen and Jonathan Sebat and Caroline M. Nievergelt},
url = {https://www.nature.com/articles/s41380-022-01776-4},
doi = {10.1038/s41380-022-01776-4},
issn = {1476-5578},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Mol Psychiatry},
volume = {27},
number = {12},
pages = {5062–5069},
abstract = {Posttraumatic stress disorder (PTSD) is a heritable (h2 = 24–71%) psychiatric illness. Copy number variation (CNV) is a form of rare genetic variation that has been implicated in the etiology of psychiatric disorders, but no large-scale investigation of CNV in PTSD has been performed. We present an association study of CNV burden and PTSD symptoms in a sample of 114,383 participants (13,036 cases and 101,347 controls) of European ancestry. CNVs were called using two calling algorithms and intersected to a consensus set. Quality control was performed to remove strong outlier samples. CNVs were examined for association with PTSD within each cohort using linear or logistic regression analysis adjusted for population structure and CNV quality metrics, then inverse variance weighted meta-analyzed across cohorts. We examined the genome-wide total span of CNVs, enrichment of CNVs within specified gene-sets, and CNVs overlapping individual genes and implicated neurodevelopmental regions. The total distance covered by deletions crossing over known neurodevelopmental CNV regions was significant (beta = 0.029},
note = {Number: 12
Publisher: Nature Publishing Group},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Becerik-Gerber, Burcin; Lucas, Gale; Aryal, Ashrant; Awada, Mohamad; Bergés, Mario; Billington, Sarah; Boric-Lubecke, Olga; Ghahramani, Ali; Heydarian, Arsalan; Höelscher, Christoph; Jazizadeh, Farrokh; Khan, Azam; Langevin, Jared; Liu, Ruying; Marks, Frederick; Mauriello, Matthew Louis; Murnane, Elizabeth; Noh, Haeyoung; Pritoni, Marco; Roll, Shawn; Schaumann, Davide; Seyedrezaei, Mirmahdi; Taylor, John E.; Zhao, Jie; Zhu, Runhe
The field of human building interaction for convergent research and innovation for intelligent built environments Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 22092, 2022, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{becerik-gerber_field_2022,
title = {The field of human building interaction for convergent research and innovation for intelligent built environments},
author = {Burcin Becerik-Gerber and Gale Lucas and Ashrant Aryal and Mohamad Awada and Mario Bergés and Sarah Billington and Olga Boric-Lubecke and Ali Ghahramani and Arsalan Heydarian and Christoph Höelscher and Farrokh Jazizadeh and Azam Khan and Jared Langevin and Ruying Liu and Frederick Marks and Matthew Louis Mauriello and Elizabeth Murnane and Haeyoung Noh and Marco Pritoni and Shawn Roll and Davide Schaumann and Mirmahdi Seyedrezaei and John E. Taylor and Jie Zhao and Runhe Zhu},
url = {https://www.nature.com/articles/s41598-022-25047-y},
doi = {10.1038/s41598-022-25047-y},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {22092},
abstract = {Human-Building Interaction (HBI) is a convergent field that represents the growing complexities of the dynamic interplay between human experience and intelligence within built environments. This paper provides core definitions, research dimensions, and an overall vision for the future of HBI as developed through consensus among 25 interdisciplinary experts in a series of facilitated workshops. Three primary areas contribute to and require attention in HBI research: humans (human experiences, performance, and well-being), buildings (building design and operations), and technologies (sensing, inference, and awareness). Three critical interdisciplinary research domains intersect these areas: control systems and decision making, trust and collaboration, and modeling and simulation. Finally, at the core, it is vital for HBI research to center on and support equity, privacy, and sustainability. Compelling research questions are posed for each primary area, research domain, and core principle. State-of-the-art methods used in HBI studies are discussed, and examples of original research are offered to illustrate opportunities for the advancement of HBI research.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Becerik-Gerber, Burçin; Lucas, Gale; Aryal, Ashrant; Awada, Mohamad; Bergés, Mario; Billington, Sarah L; Boric-Lubecke, Olga; Ghahramani, Ali; Heydarian, Arsalan; Jazizadeh, Farrokh; Liu, Ruying; Zhu, Runhe; Marks, Frederick; Roll, Shawn; Seyedrezaei, Mirmahdi; Taylor, John E.; Höelscher, Christoph; Khan, Azam; Langevin, Jared; Mauriello, Matthew Louis; Murnane, Elizabeth; Noh, Haeyoung; Pritoni, Marco; Schaumann, Davide; Zhao, Jie
Ten questions concerning human-building interaction research for improving the quality of life Journal Article
In: Building and Environment, vol. 226, pp. 109681, 2022, ISSN: 0360-1323.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{becerik-gerber_ten_2022,
title = {Ten questions concerning human-building interaction research for improving the quality of life},
author = {Burçin Becerik-Gerber and Gale Lucas and Ashrant Aryal and Mohamad Awada and Mario Bergés and Sarah L Billington and Olga Boric-Lubecke and Ali Ghahramani and Arsalan Heydarian and Farrokh Jazizadeh and Ruying Liu and Runhe Zhu and Frederick Marks and Shawn Roll and Mirmahdi Seyedrezaei and John E. Taylor and Christoph Höelscher and Azam Khan and Jared Langevin and Matthew Louis Mauriello and Elizabeth Murnane and Haeyoung Noh and Marco Pritoni and Davide Schaumann and Jie Zhao},
url = {https://www.sciencedirect.com/science/article/pii/S0360132322009118},
doi = {10.1016/j.buildenv.2022.109681},
issn = {0360-1323},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Building and Environment},
volume = {226},
pages = {109681},
abstract = {This paper seeks to address ten questions that explore the burgeoning field of Human-Building Interaction (HBI), an interdisciplinary field that represents the next frontier in convergent research and innovation to enable the dynamic interplay of human and building interactional intelligence. The field of HBI builds on several existing efforts in historically separate research fields/communities and aims to understand how buildings affect human outcomes and experiences, as well as how humans interact with, adapt to, and affect the built environment and its systems, to support buildings that can learn, enable adaptation, and evolve at different scales to improve the quality-of-life of its users while optimizing resource usage and service availability. Questions were developed by a diverse group of researchers with backgrounds in design, engineering, computer science, social science, and health science. Answers to these questions draw conclusions from what has been achieved to date as reported in the available literature and establish a foundation for future HBI research. This paper aims to encourage interdisciplinary collaborations in HBI research to change the way people interact with and perceive technology within the context of buildings and inform the design, construction, and operation of next-generation, intelligent built environments. In doing so, HBI research can realize a myriad of benefits for human users, including improved productivity, health, cognition, convenience, and comfort, all of which are essential to societal well-being.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.; Landicho, Earl
The impact of security countermeasures on human behavior during active shooter incidents Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 929, 2022, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags: DTIC, UARC
@article{zhu_impact_2022,
title = {The impact of security countermeasures on human behavior during active shooter incidents},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers and Earl Landicho},
url = {https://www.nature.com/articles/s41598-022-04922-8},
doi = {10.1038/s41598-022-04922-8},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-26},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {929},
abstract = {Abstract Active shooter incidents represent an increasing threat to American society, especially in commercial and educational buildings. In recent years, a wide variety of security countermeasures have been recommended by public and governmental agencies. Many of these countermeasures are aimed to increase building security, yet their impact on human behavior when an active shooter incident occurs remains underexplored. To fill this research gap, we conducted virtual experiments to evaluate the impact of countermeasures on human behavior during active shooter incidents. A total of 162 office workers and middle/high school teachers were recruited to respond to an active shooter incident in virtual office and school buildings with or without the implementation of multiple countermeasures. The experiment results showed countermeasures significantly influenced participants’ response time and decisions (e.g., run, hide, fight). Participants’ responses and perceptions of the active shooter incident were also contingent on their daily roles, as well as building and social contexts. Teachers had more concerns for occupants’ safety than office workers. Moreover, teachers had more positive perceptions of occupants in the school, whereas office workers had more positive perceptions of occupants in the office.},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {article}
}
Difede, JoAnn; Rothbaum, Barbara O.; Rizzo, Albert A.; Wyka, Katarzyna; Spielman, Lisa; Reist, Christopher; Roy, Michael J.; Jovanovic, Tanja; Norrholm, Seth D.; Cukor, Judith; Olden, Megan; Glatt, Charles E.; Lee, Francis S.
In: Transl Psychiatry, vol. 12, no. 1, pp. 299, 2022, ISSN: 2158-3188.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, Virtual Humans
@article{difede_enhancing_2022,
title = {Enhancing exposure therapy for posttraumatic stress disorder (PTSD): a randomized clinical trial of virtual reality and imaginal exposure with a cognitive enhancer},
author = {JoAnn Difede and Barbara O. Rothbaum and Albert A. Rizzo and Katarzyna Wyka and Lisa Spielman and Christopher Reist and Michael J. Roy and Tanja Jovanovic and Seth D. Norrholm and Judith Cukor and Megan Olden and Charles E. Glatt and Francis S. Lee},
url = {https://www.nature.com/articles/s41398-022-02066-x},
doi = {10.1038/s41398-022-02066-x},
issn = {2158-3188},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-13},
journal = {Transl Psychiatry},
volume = {12},
number = {1},
pages = {299},
abstract = {Abstract Posttraumatic stress disorder (PTSD) is a significant public health issue. Yet, there are limited treatment options and no data to suggest which treatment will work for whom. We tested the efficacy of virtual reality exposure (VRE) or prolonged imaginal exposure (PE), augmented with D-cycloserine (DCS) for combat-related PTSD. As an exploratory aim, we examined whether brain-derived neurotrophic factor (BDNF) and fatty acid amide hydrolase (FAAH) moderated treatment response. Military personnel with PTSD ( n = 192) were recruited into a multisite double-blind randomized controlled trial to receive nine weeks of VRE or PE, with DCS or placebo. Primary outcome was the improvement in symptom severity. Randomization was stratified by comorbid depression (MDD) and site. Participants in both VRE and PE showed similar meaningful clinical improvement with no difference between the treatment groups. A significant interaction ( p = 0.45) suggested VRE was more effective for depressed participants (CAPS difference M = 3.51 [95% CI 1.17–5.86]},
keywords = {DTIC, MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Feng, Andrew; Shin, Samuel; Yoon, Youngwoo
A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos Proceedings Article
In: Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games, pp. 1–7, ACM, Guanajuato Mexico, 2022, ISBN: 978-1-4503-9888-6.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{feng_tool_2022,
title = {A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos},
author = {Andrew Feng and Samuel Shin and Youngwoo Yoon},
url = {https://dl.acm.org/doi/10.1145/3561975.3562953},
doi = {10.1145/3561975.3562953},
isbn = {978-1-4503-9888-6},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-04},
booktitle = {Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games},
pages = {1–7},
publisher = {ACM},
address = {Guanajuato Mexico},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lu, Shuhong; Feng, Andrew
The DeepMotion entry to the GENEA Challenge 2022 Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 790–796, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lu_deepmotion_2022,
title = {The DeepMotion entry to the GENEA Challenge 2022},
author = {Shuhong Lu and Andrew Feng},
url = {https://dl.acm.org/doi/10.1145/3536221.3558059},
doi = {10.1145/3536221.3558059},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {790–796},
publisher = {ACM},
address = {Bengaluru India},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Xu, Jiashu; Zu, Tianxin; Soleymani, Mohammad
X-Norm: Exchanging Normalization Parameters for Bimodal Fusion Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 605–614, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
Links | BibTeX | Tags: Emotions, Virtual Humans
@inproceedings{yin_x-norm_2022,
title = {X-Norm: Exchanging Normalization Parameters for Bimodal Fusion},
author = {Yufeng Yin and Jiashu Xu and Tianxin Zu and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3536221.3556581},
doi = {10.1145/3536221.3556581},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {605–614},
publisher = {ACM},
address = {Bengaluru India},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon
Platforms and Tools for SIA Research and Development Book Section
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 261–304, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@incollection{hartholt_platforms_2022,
title = {Platforms and Tools for SIA Research and Development},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1145/3563659.3563668},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {261–304},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Traum, David
Socially Interactive Agent Dialogue Book Section
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 45–76, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
Links | BibTeX | Tags: Natural Language, UARC
@incollection{traum_socially_2022,
title = {Socially Interactive Agent Dialogue},
author = {David Traum},
url = {https://doi.org/10.1145/3563659.3563663},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {45–76},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Lugrin, Birgit; Pelachaud, Catherine; André, Elisabeth; Aylett, Ruth; Bickmore, Timothy; Breazeal, Cynthia; Broekens, Joost; Dautenhahn, Kerstin; Gratch, Jonathan; Kopp, Stefan; Nadel, Jacqueline; Paiva, Ana; Wykowska, Agnieszka
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 561–626, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{lugrin_challenge_2022,
title = {Challenge Discussion on Socially Interactive Agents: Considerations on Social Interaction, Computational Architectures, Evaluation, and Ethics},
author = {Birgit Lugrin and Catherine Pelachaud and Elisabeth André and Ruth Aylett and Timothy Bickmore and Cynthia Breazeal and Joost Broekens and Kerstin Dautenhahn and Jonathan Gratch and Stefan Kopp and Jacqueline Nadel and Ana Paiva and Agnieszka Wykowska},
url = {https://doi.org/10.1145/3563659.3563677},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {561–626},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Liu, Shichen; Cai, Yunxuan; Chen, Haiwei; Zhou, Yichao; Zhao, Yajie
Rapid Face Asset Acquisition with Recurrent Feature Alignment Journal Article
In: ACM Trans. Graph., vol. 41, no. 6, pp. 214:1–214:17, 2022, ISSN: 0730-0301.
Abstract | Links | BibTeX | Tags: VGL
@article{liu_rapid_2022,
title = {Rapid Face Asset Acquisition with Recurrent Feature Alignment},
author = {Shichen Liu and Yunxuan Cai and Haiwei Chen and Yichao Zhou and Yajie Zhao},
url = {https://dl.acm.org/doi/10.1145/3550454.3555509},
doi = {10.1145/3550454.3555509},
issn = {0730-0301},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
journal = {ACM Trans. Graph.},
volume = {41},
number = {6},
pages = {214:1–214:17},
abstract = {We present Recurrent Feature Alignment (ReFA), an end-to-end neural network for the very rapid creation of production-grade face assets from multi-view images. ReFA is on par with the industrial pipelines in quality for producing accurate, complete, registered, and textured assets directly applicable to physically-based rendering, but produces the asset end-to-end, fully automatically at a significantly faster speed at 4.5 FPS, which is unprecedented among neural-based techniques. Our method represents face geometry as a position map in the UV space. The network first extracts per-pixel features in both the multi-view image space and the UV space. A recurrent module then iteratively optimizes the geometry by projecting the image-space features to the UV space and comparing them with a reference UV-space feature. The optimized geometry then provides pixel-aligned signals for the inference of high-resolution textures. Experiments have validated that ReFA achieves a median error of 0.603mm in geometry reconstruction, is robust to extreme pose and expression, and excels in sparse-view settings. We believe that the progress achieved by our network enables lightweight, fast face assets acquisition that significantly boosts the downstream applications, such as avatar creation and facial performance capture. It will also enable massive database capturing for deep learning purposes.},
keywords = {VGL},
pubstate = {published},
tppubtype = {article}
}
Pauw, Lisanne S.; Sauter, Disa A.; Kleef, Gerben A.; Lucas, Gale M.; Gratch, Jonathan; Fischer, Agneta H.
The avatar will see you now: Support from a virtual human provides socio-emotional benefits Journal Article
In: Computers in Human Behavior, vol. 136, pp. 107368, 2022, ISSN: 07475632.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{pauw_avatar_2022,
title = {The avatar will see you now: Support from a virtual human provides socio-emotional benefits},
author = {Lisanne S. Pauw and Disa A. Sauter and Gerben A. Kleef and Gale M. Lucas and Jonathan Gratch and Agneta H. Fischer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S074756322200190X},
doi = {10.1016/j.chb.2022.107368},
issn = {07475632},
year = {2022},
date = {2022-11-01},
urldate = {2022-09-28},
journal = {Computers in Human Behavior},
volume = {136},
pages = {107368},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}