Publications
Search
Core, Mark G.; Nye, Benjamin D.; Fegley, Brent D.
Trend-Aware Scenario Authoring: Adapting Training Toward Patterns from Real Operations Book Section
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems, vol. 14727, pp. 15–24, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-60608-3 978-3-031-60609-0, (Series Title: Lecture Notes in Computer Science).
@incollection{sottilare_trend-aware_2024,
title = {Trend-Aware Scenario Authoring: Adapting Training Toward Patterns from Real Operations},
author = {Mark G. Core and Benjamin D. Nye and Brent D. Fegley},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/10.1007/978-3-031-60609-0_2},
doi = {10.1007/978-3-031-60609-0_2},
isbn = {978-3-031-60608-3 978-3-031-60609-0},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-18},
booktitle = {Adaptive Instructional Systems},
volume = {14727},
pages = {15–24},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Core, Mark G.; Chereddy, Sai V. R.; Young, Vivian; Auerbach, Daniel
Bootstrapping Assessments for Team Simulations: Transfer Learning Between First-Person-Shooter Game Maps Book Section
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems, vol. 14727, pp. 261–271, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-60608-3 978-3-031-60609-0, (Series Title: Lecture Notes in Computer Science).
@incollection{sottilare_bootstrapping_2024,
title = {Bootstrapping Assessments for Team Simulations: Transfer Learning Between First-Person-Shooter Game Maps},
author = {Benjamin D. Nye and Mark G. Core and Sai V. R. Chereddy and Vivian Young and Daniel Auerbach},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/10.1007/978-3-031-60609-0_19},
doi = {10.1007/978-3-031-60609-0_19},
isbn = {978-3-031-60608-3 978-3-031-60609-0},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-18},
booktitle = {Adaptive Instructional Systems},
volume = {14727},
pages = {261–271},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Mozgai, Sharon A; Kaurloto, Cari; Winn, Jade G; Leeds, Andrew; Beland, Sarah; Sookiassian, Arman; Hartholt, Arno
Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–8, ACM, Honolulu HI USA, 2024, ISBN: 9798400703317.
@inproceedings{mozgai_accelerating_2024,
title = {Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool},
author = {Sharon A Mozgai and Cari Kaurloto and Jade G Winn and Andrew Leeds and Sarah Beland and Arman Sookiassian and Arno Hartholt},
url = {https://dl.acm.org/doi/10.1145/3613905.3637110},
doi = {10.1145/3613905.3637110},
isbn = {9798400703317},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-18},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Loewenstein, George; Chater, Nick
Conversational technology and reactions to withheld information Journal Article
In: PLoS ONE, vol. 19, no. 4, pp. e0301382, 2024, ISSN: 1932-6203.
@article{gurney_conversational_2024,
title = {Conversational technology and reactions to withheld information},
author = {Nikolos Gurney and George Loewenstein and Nick Chater},
editor = {Petre Caraiani},
url = {https://dx.plos.org/10.1371/journal.pone.0301382},
doi = {10.1371/journal.pone.0301382},
issn = {1932-6203},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-16},
journal = {PLoS ONE},
volume = {19},
number = {4},
pages = {e0301382},
abstract = {People frequently face decisions that require making inferences about withheld information. The advent of large language models coupled with conversational technology, e.g., Alexa, Siri, Cortana, and the Google Assistant, is changing the mode in which people make these inferences. We demonstrate that conversational modes of information provision, relative to traditional digital media, result in more critical responses to withheld information, including: (1) a reduction in evaluations of a product or service for which information is withheld and (2) an increased likelihood of recalling that information was withheld. These effects are robust across multiple conversational modes: a recorded phone conversation, an unfolding chat conversation, and a conversation script. We provide further evidence that these effects hold for conversations with the Google Assistant, a prominent conversational technology. The experimental results point to participants’ intuitions about why the information was withheld as the driver of the effect.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
Dyadic Interaction Modeling for Social Behavior Generation Miscellaneous
2024, (arXiv:2403.09069 [cs]).
@misc{tran_dyadic_2024,
title = {Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.09069},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-19},
publisher = {arXiv},
abstract = {Human-human communication is like a delicate dance where listeners and speakers concurrently interact to maintain conversational dynamics. Hence, an effective model for generating listener nonverbal behaviors requires understanding the dyadic context and interaction. In this paper, we present an effective framework for creating 3D facial motions in dyadic interactions. Existing work consider a listener as a reactive agent with reflexive behaviors to the speaker's voice and facial motions. The heart of our framework is Dyadic Interaction Modeling (DIM), a pre-training approach that jointly models speakers' and listeners' motions through masking and contrastive learning to learn representations that capture the dyadic context. To enable the generation of non-deterministic behaviors, we encode both listener and speaker motions into discrete latent representations, through VQ-VAE. The pre-trained model is further fine-tuned for motion generation. Extensive experiments demonstrate the superiority of our framework in generating listener motions, establishing a new state-of-the-art according to the quantitative measures capturing the diversity and realism of generated motions. Qualitative results demonstrate the superior capabilities of the proposed approach in generating diverse and realistic expressions, eye blinks and head gestures.},
note = {arXiv:2403.09069 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Lu, Liupei; Yin, Yufeng; Gu, Yuming; Wu, Yizhen; Prasad, Pratusha; Zhao, Yajie; Soleymani, Mohammad
Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection Miscellaneous
2024, (arXiv:2403.10737 [cs]).
@misc{lu_leveraging_2024,
title = {Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection},
author = {Liupei Lu and Yufeng Yin and Yuming Gu and Yizhen Wu and Pratusha Prasad and Yajie Zhao and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.10737},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
publisher = {arXiv},
abstract = {Facial action unit (AU) detection is a fundamental block for objective facial expression analysis. Supervised learning approaches require a large amount of manual labeling which is costly. The limited labeled data are also not diverse in terms of gender which can affect model fairness. In this paper, we propose to use synthetically generated data and multi-source domain adaptation (MSDA) to address the problems of the scarcity of labeled data and the diversity of subjects. Specifically, we propose to generate a diverse dataset through synthetic facial expression re-targeting by transferring the expressions from real faces to synthetic avatars. Then, we use MSDA to transfer the AU detection knowledge from a real dataset and the synthetic dataset to a target dataset. Instead of aligning the overall distributions of different domains, we propose Paired Moment Matching (PM2) to align the features of the paired real and synthetic data with the same facial expression. To further improve gender fairness, PM2 matches the features of the real data with a female and a male synthetic image. Our results indicate that synthetic data and the proposed model improve both AU detection performance and fairness across genders, demonstrating its potential to solve AU detection in-the-wild.},
note = {arXiv:2403.10737 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Frummet, Alexander; Speggiorin, Alessandro; Elsweiler, David; Leuski, Anton; Dalton, Jeff
Cooking with Conversation: Enhancing User Engagement and Learning with a Knowledge-Enhancing Assistant Journal Article
In: ACM Trans. Inf. Syst., pp. 3649500, 2024, ISSN: 1046-8188, 1558-2868.
@article{frummet_cooking_2024,
title = {Cooking with Conversation: Enhancing User Engagement and Learning with a Knowledge-Enhancing Assistant},
author = {Alexander Frummet and Alessandro Speggiorin and David Elsweiler and Anton Leuski and Jeff Dalton},
url = {https://dl.acm.org/doi/10.1145/3649500},
doi = {10.1145/3649500},
issn = {1046-8188, 1558-2868},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
journal = {ACM Trans. Inf. Syst.},
pages = {3649500},
abstract = {We present two empirical studies to investigate users’ expectations and behaviours when using digital assistants, such as Alexa and Google Home, in a kitchen context: First, a survey (N=200) queries participants on their expectations for the kinds of information that such systems should be able to provide. While consensus exists on expecting information about cooking steps and processes, younger participants who enjoy cooking express a higher likelihood of expecting details on food history or the science of cooking. In a follow-up Wizard-of-Oz study (N = 48), users were guided through the steps of a recipe either by an
active
wizard that alerted participants to information it could provide or a
passive
wizard who only answered questions that were provided by the user. The
active
policy led to almost double the number of conversational utterances and 1.5 times more knowledge-related user questions compared to the
passive
policy. Also, it resulted in 1.7 times more knowledge communicated than the
passive
policy. We discuss the findings in the context of related work and reveal implications for the design and use of such assistants for cooking and other purposes such as DIY and craft tasks, as well as the lessons we learned for evaluating such systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
active
wizard that alerted participants to information it could provide or a
passive
wizard who only answered questions that were provided by the user. The
active
policy led to almost double the number of conversational utterances and 1.5 times more knowledge-related user questions compared to the
passive
policy. Also, it resulted in 1.7 times more knowledge communicated than the
passive
policy. We discuss the findings in the context of related work and reveal implications for the design and use of such assistants for cooking and other purposes such as DIY and craft tasks, as well as the lessons we learned for evaluating such systems.
Gordon, Andrew S.; Feng, Andrew
Combining the Predictions of Out-of-Domain Classifiers Using Etcetera Abduction Proceedings Article
In: 2024 58th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Princeton, NJ, USA, 2024, ISBN: 9798350369298.
@inproceedings{gordon_combining_2024,
title = {Combining the Predictions of Out-of-Domain Classifiers Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10480194/},
doi = {10.1109/CISS59072.2024.10480194},
isbn = {9798350369298},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
booktitle = {2024 58th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Princeton, NJ, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Ustun, Volkan
Spontaneous Theory of Mind for Artificial Intelligence Journal Article
In: 2024, (Publisher: [object Object] Version Number: 1).
@article{gurney_spontaneous_2024,
title = {Spontaneous Theory of Mind for Artificial Intelligence},
author = {Nikolos Gurney and David V. Pynadath and Volkan Ustun},
url = {https://arxiv.org/abs/2402.13272},
doi = {10.48550/ARXIV.2402.13272},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
abstract = {Existing approaches to Theory of Mind (ToM) in Artificial Intelligence (AI) overemphasize prompted, or cue-based, ToM, which may limit our collective ability to develop Artificial Social Intelligence (ASI). Drawing from research in computer science, cognitive science, and related disciplines, we contrast prompted ToM with what we call spontaneous ToM – reasoning about others' mental states that is grounded in unintentional, possibly uncontrollable cognitive functions. We argue for a principled approach to studying and developing AI ToM and suggest that a robust, or general, ASI will respond to prompts textbackslashtextitand spontaneously engage in social reasoning.},
note = {Publisher: [object Object]
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Morstatter, Fred; Pynadath, David V.; Russell, Adam; Satyukov, Gleb
Operational Collective Intelligence of Humans and Machines Journal Article
In: 2024, (Publisher: [object Object] Version Number: 1).
@article{gurney_operational_2024,
title = {Operational Collective Intelligence of Humans and Machines},
author = {Nikolos Gurney and Fred Morstatter and David V. Pynadath and Adam Russell and Gleb Satyukov},
url = {https://arxiv.org/abs/2402.13273},
doi = {10.48550/ARXIV.2402.13273},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
abstract = {We explore the use of aggregative crowdsourced forecasting (ACF) as a mechanism to help operationalize ``collective intelligence'' of human-machine teams for coordinated actions. We adopt the definition for Collective Intelligence as: ``A property of groups that emerges from synergies among data-information-knowledge, software-hardware, and individuals (those with new insights as well as recognized authorities) that enables just-in-time knowledge for better decisions than these three elements acting alone.'' Collective Intelligence emerges from new ways of connecting humans and AI to enable decision-advantage, in part by creating and leveraging additional sources of information that might otherwise not be included. Aggregative crowdsourced forecasting (ACF) is a recent key advancement towards Collective Intelligence wherein predictions (Xtextbackslash% probability that Y will happen) and rationales (why I believe it is this probability that X will happen) are elicited independently from a diverse crowd, aggregated, and then used to inform higher-level decision-making. This research asks whether ACF, as a key way to enable Operational Collective Intelligence, could be brought to bear on operational scenarios (i.e., sequences of events with defined agents, components, and interactions) and decision-making, and considers whether such a capability could provide novel operational capabilities to enable new forms of decision-advantage.},
note = {Publisher: [object Object]
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Stress appraisal in the workplace and its associations with productivity and mood: Insights from a multimodal machine learning analysis Journal Article
In: PLoS ONE, vol. 19, no. 1, pp. e0296468, 2024, ISSN: 1932-6203.
@article{awada_stress_2024,
title = {Stress appraisal in the workplace and its associations with productivity and mood: Insights from a multimodal machine learning analysis},
author = {Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
editor = {Iftikhar Ahmed Khan},
url = {https://dx.plos.org/10.1371/journal.pone.0296468},
doi = {10.1371/journal.pone.0296468},
issn = {1932-6203},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {PLoS ONE},
volume = {19},
number = {1},
pages = {e0296468},
abstract = {Previous studies have primarily focused on predicting stress arousal, encompassing physiological, behavioral, and psychological responses to stressors, while neglecting the examination of stress appraisal. Stress appraisal involves the cognitive evaluation of a situation as stressful or non-stressful, and as a threat/pressure or a challenge/opportunity. In this study, we investigated several research questions related to the association between states of stress appraisal (i.e., boredom, eustress, coexisting eustress-distress, distress) and various factors such as stress levels, mood, productivity, physiological and behavioral responses, as well as the most effective ML algorithms and data signals for predicting stress appraisal. The results support the Yerkes-Dodson law, showing that a moderate stress level is associated with increased productivity and positive mood, while low and high levels of stress are related to decreased productivity and negative mood, with distress overpowering eustress when they coexist. Changes in stress appraisal relative to physiological and behavioral features were examined through the lenses of stress arousal, activity engagement, and performance. An XGBOOST model achieved the best prediction accuracies of stress appraisal, reaching 82.78% when combining physiological and behavioral features and 79.55% using only the physiological dataset. The small accuracy difference of 3% indicates that physiological data alone may be adequate to accurately predict stress appraisal, and the feature importance results identified electrodermal activity, skin temperature, and blood volume pulse as the most useful physiologic features. Implementing these models within work environments can serve as a foundation for designing workplace policies, practices, and stress management strategies that prioritize the promotion of eustress while reducing distress and boredom. Such efforts can foster a supportive work environment to enhance employee well-being and productivity.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Spiegel, Brennan M. R.; Rizzo, Albert; Persky, Susan; Liran, Omer; Wiederhold, Brenda; Woods, Susan; Donovan, Kate; Sarkar, Korak; Xiang, Henry; Joo, Sun; Jotwani, Rohan; Lang, Min; Paul, Margot; Senter-Zapata, Mike; Widmeier, Keith; Zhang, Haipeng
What Is Medical Extended Reality? A Taxonomy Defining the Current Breadth and Depth of an Evolving Field Journal Article
In: Journal of Medical Extended Reality, vol. 1, no. 1, pp. 4–12, 2024, ISSN: 2994-1520.
@article{spiegel_what_2024,
title = {What Is Medical Extended Reality? A Taxonomy Defining the Current Breadth and Depth of an Evolving Field},
author = {Brennan M. R. Spiegel and Albert Rizzo and Susan Persky and Omer Liran and Brenda Wiederhold and Susan Woods and Kate Donovan and Korak Sarkar and Henry Xiang and Sun Joo and Rohan Jotwani and Min Lang and Margot Paul and Mike Senter-Zapata and Keith Widmeier and Haipeng Zhang},
url = {https://www.liebertpub.com/doi/10.1089/jmxr.2023.0012},
doi = {10.1089/jmxr.2023.0012},
issn = {2994-1520},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-20},
journal = {Journal of Medical Extended Reality},
volume = {1},
number = {1},
pages = {4–12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Barrett, Trevor; Faulk, Robert; Sergeant, Army Master; Boberg, Jill; Bartels, Matthew; Colonel, Marine Lieutenant; Saxon, Leslie A.
Force plate assessments in reconnaissance marine training company Journal Article
In: BMC Sports Sci Med Rehabil, vol. 16, no. 1, pp. 16, 2024, ISSN: 2052-1847.
@article{barrett_force_2024,
title = {Force plate assessments in reconnaissance marine training company},
author = {Trevor Barrett and Robert Faulk and Army Master Sergeant and Jill Boberg and Matthew Bartels and Marine Lieutenant Colonel and Leslie A. Saxon},
url = {https://bmcsportsscimedrehabil.biomedcentral.com/articles/10.1186/s13102-023-00796-z},
doi = {10.1186/s13102-023-00796-z},
issn = {2052-1847},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-22},
journal = {BMC Sports Sci Med Rehabil},
volume = {16},
number = {1},
pages = {16},
abstract = {Abstract
The ability to obtain dynamic movement assessments using force plate technology holds the promise of providing more detailed knowledge of the strength, balance and forces generated by active-duty military personnel. To date, there are not well-defined use cases for implementation of force plate assessments in military training environments. We sought to determine if force plate technology assessments could provide additional insights, related to the likelihood of graduation, beyond that provided by traditional physical fitness tests (PFT’s), in an elite Marine training school. Serial force plate measures were also obtained on those Marines successfully completing training to determine if consistent measures reflecting the effects of training on muscle skeletal load-over-time could be accurately measured. A pre-training force plate assessment performed in 112 Marines did not predict graduation rates. For Marines who successfully completed the course, serial measures obtained throughout training were highly variable for each individual and no firm conclusions could be drawn related to load imposed or the fitness attained during training.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
The ability to obtain dynamic movement assessments using force plate technology holds the promise of providing more detailed knowledge of the strength, balance and forces generated by active-duty military personnel. To date, there are not well-defined use cases for implementation of force plate assessments in military training environments. We sought to determine if force plate technology assessments could provide additional insights, related to the likelihood of graduation, beyond that provided by traditional physical fitness tests (PFT’s), in an elite Marine training school. Serial force plate measures were also obtained on those Marines successfully completing training to determine if consistent measures reflecting the effects of training on muscle skeletal load-over-time could be accurately measured. A pre-training force plate assessment performed in 112 Marines did not predict graduation rates. For Marines who successfully completed the course, serial measures obtained throughout training were highly variable for each individual and no firm conclusions could be drawn related to load imposed or the fitness attained during training.
Hartholt, Arno; Leeds, Andrew; Fast, Ed; Sookiassian, Edwin; Kim, Kevin; Beland, Sarah; Kulkarni, Pranav; Mozgai, Sharon
Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms Proceedings Article
In: 2024.
@inproceedings{hartholt_multidisciplinary_2024,
title = {Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms},
author = {Arno Hartholt and Andrew Leeds and Ed Fast and Edwin Sookiassian and Kevin Kim and Sarah Beland and Pranav Kulkarni and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-95-7/article/978-1-958651-95-7_33},
doi = {10.54941/ahfe1004497},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
abstract = {The current pace of technological advancements has led to an ever-increasing availability of technologies to investigate and help address the challenges that contemporary society faces today. However, while this trend increases the potential for creating more relevant, effective, and efficient solutions, it also inherently increases the complexity of realizing that potential. Our work aims to manage this complexity through the creation and dissemination of integrated middleware platforms that enable researchers and developers to rapidly prototype novel solutions within the areas of modelling & simulation, virtual humans, and virtual worlds. In this paper, we discuss two related platforms: the Rapid Integration & Development Environment (RIDE) and the Virtual Human Toolkit (VHToolkit). Specifically, we explore two use cases: 1) the development of an authoring tool aimed at domain experts to rapidly create low-echelon military training scenarios, and 2) the development of a virtual human led mHealth wellness and suicide prevention app for veterans.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Timothy S.; Gordon, Andrew S.
Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5 Book Section
In: Holloway-Attaway, Lissa; Murray, John T. (Ed.): Interactive Storytelling, vol. 14384, pp. 297–305, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-47657-0 978-3-031-47658-7, (Series Title: Lecture Notes in Computer Science).
@incollection{holloway-attaway_playing_2023,
title = {Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5},
author = {Timothy S. Wang and Andrew S. Gordon},
editor = {Lissa Holloway-Attaway and John T. Murray},
url = {https://link.springer.com/10.1007/978-3-031-47658-7_28},
doi = {10.1007/978-3-031-47658-7_28},
isbn = {978-3-031-47657-0 978-3-031-47658-7},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Interactive Storytelling},
volume = {14384},
pages = {297–305},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Filter
2024
Core, Mark G.; Nye, Benjamin D.; Fegley, Brent D.
Trend-Aware Scenario Authoring: Adapting Training Toward Patterns from Real Operations Book Section
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems, vol. 14727, pp. 15–24, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-60608-3 978-3-031-60609-0, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC
@incollection{sottilare_trend-aware_2024,
title = {Trend-Aware Scenario Authoring: Adapting Training Toward Patterns from Real Operations},
author = {Mark G. Core and Benjamin D. Nye and Brent D. Fegley},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/10.1007/978-3-031-60609-0_2},
doi = {10.1007/978-3-031-60609-0_2},
isbn = {978-3-031-60608-3 978-3-031-60609-0},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-18},
booktitle = {Adaptive Instructional Systems},
volume = {14727},
pages = {15–24},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Core, Mark G.; Chereddy, Sai V. R.; Young, Vivian; Auerbach, Daniel
Bootstrapping Assessments for Team Simulations: Transfer Learning Between First-Person-Shooter Game Maps Book Section
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems, vol. 14727, pp. 261–271, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-60608-3 978-3-031-60609-0, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, Learning Sciences, Machine Learning, UARC
@incollection{sottilare_bootstrapping_2024,
title = {Bootstrapping Assessments for Team Simulations: Transfer Learning Between First-Person-Shooter Game Maps},
author = {Benjamin D. Nye and Mark G. Core and Sai V. R. Chereddy and Vivian Young and Daniel Auerbach},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/10.1007/978-3-031-60609-0_19},
doi = {10.1007/978-3-031-60609-0_19},
isbn = {978-3-031-60608-3 978-3-031-60609-0},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-18},
booktitle = {Adaptive Instructional Systems},
volume = {14727},
pages = {261–271},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, Learning Sciences, Machine Learning, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Mozgai, Sharon A; Kaurloto, Cari; Winn, Jade G; Leeds, Andrew; Beland, Sarah; Sookiassian, Arman; Hartholt, Arno
Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–8, ACM, Honolulu HI USA, 2024, ISBN: 9798400703317.
Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@inproceedings{mozgai_accelerating_2024,
title = {Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool},
author = {Sharon A Mozgai and Cari Kaurloto and Jade G Winn and Andrew Leeds and Sarah Beland and Arman Sookiassian and Arno Hartholt},
url = {https://dl.acm.org/doi/10.1145/3613905.3637110},
doi = {10.1145/3613905.3637110},
isbn = {9798400703317},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-18},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Loewenstein, George; Chater, Nick
Conversational technology and reactions to withheld information Journal Article
In: PLoS ONE, vol. 19, no. 4, pp. e0301382, 2024, ISSN: 1932-6203.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@article{gurney_conversational_2024,
title = {Conversational technology and reactions to withheld information},
author = {Nikolos Gurney and George Loewenstein and Nick Chater},
editor = {Petre Caraiani},
url = {https://dx.plos.org/10.1371/journal.pone.0301382},
doi = {10.1371/journal.pone.0301382},
issn = {1932-6203},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-16},
journal = {PLoS ONE},
volume = {19},
number = {4},
pages = {e0301382},
abstract = {People frequently face decisions that require making inferences about withheld information. The advent of large language models coupled with conversational technology, e.g., Alexa, Siri, Cortana, and the Google Assistant, is changing the mode in which people make these inferences. We demonstrate that conversational modes of information provision, relative to traditional digital media, result in more critical responses to withheld information, including: (1) a reduction in evaluations of a product or service for which information is withheld and (2) an increased likelihood of recalling that information was withheld. These effects are robust across multiple conversational modes: a recorded phone conversation, an unfolding chat conversation, and a conversation script. We provide further evidence that these effects hold for conversations with the Google Assistant, a prominent conversational technology. The experimental results point to participants’ intuitions about why the information was withheld as the driver of the effect.},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
Dyadic Interaction Modeling for Social Behavior Generation Miscellaneous
2024, (arXiv:2403.09069 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@misc{tran_dyadic_2024,
title = {Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.09069},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-19},
publisher = {arXiv},
abstract = {Human-human communication is like a delicate dance where listeners and speakers concurrently interact to maintain conversational dynamics. Hence, an effective model for generating listener nonverbal behaviors requires understanding the dyadic context and interaction. In this paper, we present an effective framework for creating 3D facial motions in dyadic interactions. Existing work consider a listener as a reactive agent with reflexive behaviors to the speaker's voice and facial motions. The heart of our framework is Dyadic Interaction Modeling (DIM), a pre-training approach that jointly models speakers' and listeners' motions through masking and contrastive learning to learn representations that capture the dyadic context. To enable the generation of non-deterministic behaviors, we encode both listener and speaker motions into discrete latent representations, through VQ-VAE. The pre-trained model is further fine-tuned for motion generation. Extensive experiments demonstrate the superiority of our framework in generating listener motions, establishing a new state-of-the-art according to the quantitative measures capturing the diversity and realism of generated motions. Qualitative results demonstrate the superior capabilities of the proposed approach in generating diverse and realistic expressions, eye blinks and head gestures.},
note = {arXiv:2403.09069 [cs]},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Lu, Liupei; Yin, Yufeng; Gu, Yuming; Wu, Yizhen; Prasad, Pratusha; Zhao, Yajie; Soleymani, Mohammad
Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection Miscellaneous
2024, (arXiv:2403.10737 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@misc{lu_leveraging_2024,
title = {Leveraging Synthetic Data for Generalizable and Fair Facial Action Unit Detection},
author = {Liupei Lu and Yufeng Yin and Yuming Gu and Yizhen Wu and Pratusha Prasad and Yajie Zhao and Mohammad Soleymani},
url = {http://arxiv.org/abs/2403.10737},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
publisher = {arXiv},
abstract = {Facial action unit (AU) detection is a fundamental block for objective facial expression analysis. Supervised learning approaches require a large amount of manual labeling which is costly. The limited labeled data are also not diverse in terms of gender which can affect model fairness. In this paper, we propose to use synthetically generated data and multi-source domain adaptation (MSDA) to address the problems of the scarcity of labeled data and the diversity of subjects. Specifically, we propose to generate a diverse dataset through synthetic facial expression re-targeting by transferring the expressions from real faces to synthetic avatars. Then, we use MSDA to transfer the AU detection knowledge from a real dataset and the synthetic dataset to a target dataset. Instead of aligning the overall distributions of different domains, we propose Paired Moment Matching (PM2) to align the features of the paired real and synthetic data with the same facial expression. To further improve gender fairness, PM2 matches the features of the real data with a female and a male synthetic image. Our results indicate that synthetic data and the proposed model improve both AU detection performance and fairness across genders, demonstrating its potential to solve AU detection in-the-wild.},
note = {arXiv:2403.10737 [cs]},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Frummet, Alexander; Speggiorin, Alessandro; Elsweiler, David; Leuski, Anton; Dalton, Jeff
Cooking with Conversation: Enhancing User Engagement and Learning with a Knowledge-Enhancing Assistant Journal Article
In: ACM Trans. Inf. Syst., pp. 3649500, 2024, ISSN: 1046-8188, 1558-2868.
Abstract | Links | BibTeX | Tags: DTIC, Natural Language, UARC
@article{frummet_cooking_2024,
title = {Cooking with Conversation: Enhancing User Engagement and Learning with a Knowledge-Enhancing Assistant},
author = {Alexander Frummet and Alessandro Speggiorin and David Elsweiler and Anton Leuski and Jeff Dalton},
url = {https://dl.acm.org/doi/10.1145/3649500},
doi = {10.1145/3649500},
issn = {1046-8188, 1558-2868},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
journal = {ACM Trans. Inf. Syst.},
pages = {3649500},
abstract = {We present two empirical studies to investigate users’ expectations and behaviours when using digital assistants, such as Alexa and Google Home, in a kitchen context: First, a survey (N=200) queries participants on their expectations for the kinds of information that such systems should be able to provide. While consensus exists on expecting information about cooking steps and processes, younger participants who enjoy cooking express a higher likelihood of expecting details on food history or the science of cooking. In a follow-up Wizard-of-Oz study (N = 48), users were guided through the steps of a recipe either by an
active
wizard that alerted participants to information it could provide or a
passive
wizard who only answered questions that were provided by the user. The
active
policy led to almost double the number of conversational utterances and 1.5 times more knowledge-related user questions compared to the
passive
policy. Also, it resulted in 1.7 times more knowledge communicated than the
passive
policy. We discuss the findings in the context of related work and reveal implications for the design and use of such assistants for cooking and other purposes such as DIY and craft tasks, as well as the lessons we learned for evaluating such systems.},
keywords = {DTIC, Natural Language, UARC},
pubstate = {published},
tppubtype = {article}
}
active
wizard that alerted participants to information it could provide or a
passive
wizard who only answered questions that were provided by the user. The
active
policy led to almost double the number of conversational utterances and 1.5 times more knowledge-related user questions compared to the
passive
policy. Also, it resulted in 1.7 times more knowledge communicated than the
passive
policy. We discuss the findings in the context of related work and reveal implications for the design and use of such assistants for cooking and other purposes such as DIY and craft tasks, as well as the lessons we learned for evaluating such systems.
Gordon, Andrew S.; Feng, Andrew
Combining the Predictions of Out-of-Domain Classifiers Using Etcetera Abduction Proceedings Article
In: 2024 58th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Princeton, NJ, USA, 2024, ISBN: 9798350369298.
Links | BibTeX | Tags: DTIC, Narrative, The Narrative Group, UARC
@inproceedings{gordon_combining_2024,
title = {Combining the Predictions of Out-of-Domain Classifiers Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10480194/},
doi = {10.1109/CISS59072.2024.10480194},
isbn = {9798350369298},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-16},
booktitle = {2024 58th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Princeton, NJ, USA},
keywords = {DTIC, Narrative, The Narrative Group, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Ustun, Volkan
Spontaneous Theory of Mind for Artificial Intelligence Journal Article
In: 2024, (Publisher: [object Object] Version Number: 1).
Abstract | Links | BibTeX | Tags: AI, DTIC, Social Simulation, UARC
@article{gurney_spontaneous_2024,
title = {Spontaneous Theory of Mind for Artificial Intelligence},
author = {Nikolos Gurney and David V. Pynadath and Volkan Ustun},
url = {https://arxiv.org/abs/2402.13272},
doi = {10.48550/ARXIV.2402.13272},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
abstract = {Existing approaches to Theory of Mind (ToM) in Artificial Intelligence (AI) overemphasize prompted, or cue-based, ToM, which may limit our collective ability to develop Artificial Social Intelligence (ASI). Drawing from research in computer science, cognitive science, and related disciplines, we contrast prompted ToM with what we call spontaneous ToM – reasoning about others' mental states that is grounded in unintentional, possibly uncontrollable cognitive functions. We argue for a principled approach to studying and developing AI ToM and suggest that a robust, or general, ASI will respond to prompts textbackslashtextitand spontaneously engage in social reasoning.},
note = {Publisher: [object Object]
Version Number: 1},
keywords = {AI, DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Morstatter, Fred; Pynadath, David V.; Russell, Adam; Satyukov, Gleb
Operational Collective Intelligence of Humans and Machines Journal Article
In: 2024, (Publisher: [object Object] Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@article{gurney_operational_2024,
title = {Operational Collective Intelligence of Humans and Machines},
author = {Nikolos Gurney and Fred Morstatter and David V. Pynadath and Adam Russell and Gleb Satyukov},
url = {https://arxiv.org/abs/2402.13273},
doi = {10.48550/ARXIV.2402.13273},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
abstract = {We explore the use of aggregative crowdsourced forecasting (ACF) as a mechanism to help operationalize ``collective intelligence'' of human-machine teams for coordinated actions. We adopt the definition for Collective Intelligence as: ``A property of groups that emerges from synergies among data-information-knowledge, software-hardware, and individuals (those with new insights as well as recognized authorities) that enables just-in-time knowledge for better decisions than these three elements acting alone.'' Collective Intelligence emerges from new ways of connecting humans and AI to enable decision-advantage, in part by creating and leveraging additional sources of information that might otherwise not be included. Aggregative crowdsourced forecasting (ACF) is a recent key advancement towards Collective Intelligence wherein predictions (Xtextbackslash% probability that Y will happen) and rationales (why I believe it is this probability that X will happen) are elicited independently from a diverse crowd, aggregated, and then used to inform higher-level decision-making. This research asks whether ACF, as a key way to enable Operational Collective Intelligence, could be brought to bear on operational scenarios (i.e., sequences of events with defined agents, components, and interactions) and decision-making, and considers whether such a capability could provide novel operational capabilities to enable new forms of decision-advantage.},
note = {Publisher: [object Object]
Version Number: 1},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Stress appraisal in the workplace and its associations with productivity and mood: Insights from a multimodal machine learning analysis Journal Article
In: PLoS ONE, vol. 19, no. 1, pp. e0296468, 2024, ISSN: 1932-6203.
Abstract | Links | BibTeX | Tags: DTIC, Machine Learning, UARC
@article{awada_stress_2024,
title = {Stress appraisal in the workplace and its associations with productivity and mood: Insights from a multimodal machine learning analysis},
author = {Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
editor = {Iftikhar Ahmed Khan},
url = {https://dx.plos.org/10.1371/journal.pone.0296468},
doi = {10.1371/journal.pone.0296468},
issn = {1932-6203},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-21},
journal = {PLoS ONE},
volume = {19},
number = {1},
pages = {e0296468},
abstract = {Previous studies have primarily focused on predicting stress arousal, encompassing physiological, behavioral, and psychological responses to stressors, while neglecting the examination of stress appraisal. Stress appraisal involves the cognitive evaluation of a situation as stressful or non-stressful, and as a threat/pressure or a challenge/opportunity. In this study, we investigated several research questions related to the association between states of stress appraisal (i.e., boredom, eustress, coexisting eustress-distress, distress) and various factors such as stress levels, mood, productivity, physiological and behavioral responses, as well as the most effective ML algorithms and data signals for predicting stress appraisal. The results support the Yerkes-Dodson law, showing that a moderate stress level is associated with increased productivity and positive mood, while low and high levels of stress are related to decreased productivity and negative mood, with distress overpowering eustress when they coexist. Changes in stress appraisal relative to physiological and behavioral features were examined through the lenses of stress arousal, activity engagement, and performance. An XGBOOST model achieved the best prediction accuracies of stress appraisal, reaching 82.78% when combining physiological and behavioral features and 79.55% using only the physiological dataset. The small accuracy difference of 3% indicates that physiological data alone may be adequate to accurately predict stress appraisal, and the feature importance results identified electrodermal activity, skin temperature, and blood volume pulse as the most useful physiologic features. Implementing these models within work environments can serve as a foundation for designing workplace policies, practices, and stress management strategies that prioritize the promotion of eustress while reducing distress and boredom. Such efforts can foster a supportive work environment to enhance employee well-being and productivity.},
keywords = {DTIC, Machine Learning, UARC},
pubstate = {published},
tppubtype = {article}
}
Spiegel, Brennan M. R.; Rizzo, Albert; Persky, Susan; Liran, Omer; Wiederhold, Brenda; Woods, Susan; Donovan, Kate; Sarkar, Korak; Xiang, Henry; Joo, Sun; Jotwani, Rohan; Lang, Min; Paul, Margot; Senter-Zapata, Mike; Widmeier, Keith; Zhang, Haipeng
What Is Medical Extended Reality? A Taxonomy Defining the Current Breadth and Depth of an Evolving Field Journal Article
In: Journal of Medical Extended Reality, vol. 1, no. 1, pp. 4–12, 2024, ISSN: 2994-1520.
Links | BibTeX | Tags: DTIC, MedVR, UARC
@article{spiegel_what_2024,
title = {What Is Medical Extended Reality? A Taxonomy Defining the Current Breadth and Depth of an Evolving Field},
author = {Brennan M. R. Spiegel and Albert Rizzo and Susan Persky and Omer Liran and Brenda Wiederhold and Susan Woods and Kate Donovan and Korak Sarkar and Henry Xiang and Sun Joo and Rohan Jotwani and Min Lang and Margot Paul and Mike Senter-Zapata and Keith Widmeier and Haipeng Zhang},
url = {https://www.liebertpub.com/doi/10.1089/jmxr.2023.0012},
doi = {10.1089/jmxr.2023.0012},
issn = {2994-1520},
year = {2024},
date = {2024-01-01},
urldate = {2024-02-20},
journal = {Journal of Medical Extended Reality},
volume = {1},
number = {1},
pages = {4–12},
keywords = {DTIC, MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Barrett, Trevor; Faulk, Robert; Sergeant, Army Master; Boberg, Jill; Bartels, Matthew; Colonel, Marine Lieutenant; Saxon, Leslie A.
Force plate assessments in reconnaissance marine training company Journal Article
In: BMC Sports Sci Med Rehabil, vol. 16, no. 1, pp. 16, 2024, ISSN: 2052-1847.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, UARC
@article{barrett_force_2024,
title = {Force plate assessments in reconnaissance marine training company},
author = {Trevor Barrett and Robert Faulk and Army Master Sergeant and Jill Boberg and Matthew Bartels and Marine Lieutenant Colonel and Leslie A. Saxon},
url = {https://bmcsportsscimedrehabil.biomedcentral.com/articles/10.1186/s13102-023-00796-z},
doi = {10.1186/s13102-023-00796-z},
issn = {2052-1847},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-22},
journal = {BMC Sports Sci Med Rehabil},
volume = {16},
number = {1},
pages = {16},
abstract = {Abstract
The ability to obtain dynamic movement assessments using force plate technology holds the promise of providing more detailed knowledge of the strength, balance and forces generated by active-duty military personnel. To date, there are not well-defined use cases for implementation of force plate assessments in military training environments. We sought to determine if force plate technology assessments could provide additional insights, related to the likelihood of graduation, beyond that provided by traditional physical fitness tests (PFT’s), in an elite Marine training school. Serial force plate measures were also obtained on those Marines successfully completing training to determine if consistent measures reflecting the effects of training on muscle skeletal load-over-time could be accurately measured. A pre-training force plate assessment performed in 112 Marines did not predict graduation rates. For Marines who successfully completed the course, serial measures obtained throughout training were highly variable for each individual and no firm conclusions could be drawn related to load imposed or the fitness attained during training.},
keywords = {DTIC, MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
The ability to obtain dynamic movement assessments using force plate technology holds the promise of providing more detailed knowledge of the strength, balance and forces generated by active-duty military personnel. To date, there are not well-defined use cases for implementation of force plate assessments in military training environments. We sought to determine if force plate technology assessments could provide additional insights, related to the likelihood of graduation, beyond that provided by traditional physical fitness tests (PFT’s), in an elite Marine training school. Serial force plate measures were also obtained on those Marines successfully completing training to determine if consistent measures reflecting the effects of training on muscle skeletal load-over-time could be accurately measured. A pre-training force plate assessment performed in 112 Marines did not predict graduation rates. For Marines who successfully completed the course, serial measures obtained throughout training were highly variable for each individual and no firm conclusions could be drawn related to load imposed or the fitness attained during training.
Hartholt, Arno; Leeds, Andrew; Fast, Ed; Sookiassian, Edwin; Kim, Kevin; Beland, Sarah; Kulkarni, Pranav; Mozgai, Sharon
Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms Proceedings Article
In: 2024.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hartholt_multidisciplinary_2024,
title = {Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms},
author = {Arno Hartholt and Andrew Leeds and Ed Fast and Edwin Sookiassian and Kevin Kim and Sarah Beland and Pranav Kulkarni and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-95-7/article/978-1-958651-95-7_33},
doi = {10.54941/ahfe1004497},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
abstract = {The current pace of technological advancements has led to an ever-increasing availability of technologies to investigate and help address the challenges that contemporary society faces today. However, while this trend increases the potential for creating more relevant, effective, and efficient solutions, it also inherently increases the complexity of realizing that potential. Our work aims to manage this complexity through the creation and dissemination of integrated middleware platforms that enable researchers and developers to rapidly prototype novel solutions within the areas of modelling & simulation, virtual humans, and virtual worlds. In this paper, we discuss two related platforms: the Rapid Integration & Development Environment (RIDE) and the Virtual Human Toolkit (VHToolkit). Specifically, we explore two use cases: 1) the development of an authoring tool aimed at domain experts to rapidly create low-echelon military training scenarios, and 2) the development of a virtual human led mHealth wellness and suicide prevention app for veterans.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Tak, Ala Nekouvaght; Becerik-Gerber, Burçin; Soibelman, Lucio; Lucas, Gale
A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems Journal Article
In: Building and Environment, vol. 245, pp. 110935, 2023, ISSN: 03601323.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{tak_framework_2023,
title = {A framework for investigating the acceptance of smart home technologies: Findings for residential smart HVAC systems},
author = {Ala Nekouvaght Tak and Burçin Becerik-Gerber and Lucio Soibelman and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323009629},
doi = {10.1016/j.buildenv.2023.110935},
issn = {03601323},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
journal = {Building and Environment},
volume = {245},
pages = {110935},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Cho, Hyundong; Liu, Shuai; Shi, Taiwei; Jain, Darpan; Rizk, Basem; Huang, Yuyang; Lu, Zixun; Wen, Nuan; Gratch, Jonathan; Ferrara, Emilio; May, Jonathan
Can Language Model Moderators Improve the Health of Online Discourse? Miscellaneous
2023, (arXiv:2311.10781 [cs]).
Abstract | Links | BibTeX | Tags: AI, Dialogue, DTIC, UARC, Virtual Humans
@misc{cho_can_2023,
title = {Can Language Model Moderators Improve the Health of Online Discourse?},
author = {Hyundong Cho and Shuai Liu and Taiwei Shi and Darpan Jain and Basem Rizk and Yuyang Huang and Zixun Lu and Nuan Wen and Jonathan Gratch and Emilio Ferrara and Jonathan May},
url = {http://arxiv.org/abs/2311.10781},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human moderation of online conversation is essential to maintaining civility and focus in a dialogue, but is challenging to scale and harmful to moderators. The inclusion of sophisticated natural language generation modules as a force multiplier aid moderators is a tantalizing prospect, but adequate evaluation approaches have so far been elusive. In this paper, we establish a systematic definition of conversational moderation effectiveness through a multidisciplinary lens that incorporates insights from social science. We then propose a comprehensive evaluation framework that uses this definition to asses models' moderation capabilities independently of human intervention. With our framework, we conduct the first known study of conversational dialogue models as moderators, finding that appropriately prompted models can provide specific and fair feedback on toxic behavior but struggle to influence users to increase their levels of respect and cooperation.},
note = {arXiv:2311.10781 [cs]},
keywords = {AI, Dialogue, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Yang, Daniel; Kommineni, Aditya; Alshehri, Mohammad; Mohanty, Nilamadhab; Modi, Vedant; Gratch, Jonathan; Narayanan, Shrikanth
Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models Miscellaneous
2023, (arXiv:2311.03551 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@misc{yang_context_2023,
title = {Context Unlocks Emotions: Text-based Emotion Classification Dataset Auditing with Large Language Models},
author = {Daniel Yang and Aditya Kommineni and Mohammad Alshehri and Nilamadhab Mohanty and Vedant Modi and Jonathan Gratch and Shrikanth Narayanan},
url = {http://arxiv.org/abs/2311.03551},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {The lack of contextual information in text data can make the annotation process of text-based emotion classification datasets challenging. As a result, such datasets often contain labels that fail to consider all the relevant emotions in the vocabulary. This misalignment between text inputs and labels can degrade the performance of machine learning models trained on top of them. As re-annotating entire datasets is a costly and time-consuming task that cannot be done at scale, we propose to use the expressive capabilities of large language models to synthesize additional context for input text to increase its alignment with the annotated emotional labels. In this work, we propose a formal definition of textual context to motivate a prompting strategy to enhance such contextual information. We provide both human and empirical evaluation to demonstrate the efficacy of the enhanced context. Our method improves alignment between inputs and their human-annotated labels from both an empirical and human-evaluated standpoint.},
note = {arXiv:2311.03551 [cs]},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Chang, Di; Shi, Yichun; Gao, Quankai; Fu, Jessica; Xu, Hongyi; Song, Guoxian; Yan, Qing; Yang, Xiao; Soleymani, Mohammad
MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer Miscellaneous
2023, (arXiv:2311.12052 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@misc{chang_magicdance_2023,
title = {MagicDance: Realistic Human Dance Video Generation with Motions & Facial Expressions Transfer},
author = {Di Chang and Yichun Shi and Quankai Gao and Jessica Fu and Hongyi Xu and Guoxian Song and Qing Yan and Xiao Yang and Mohammad Soleymani},
url = {http://arxiv.org/abs/2311.12052},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {In this work, we propose MagicDance, a diffusion-based model for 2D human motion and facial expression transfer on challenging human dance videos. Specifically, we aim to generate human dance videos of any target identity driven by novel pose sequences while keeping the identity unchanged. To this end, we propose a two-stage training strategy to disentangle human motions and appearance (e.g., facial expressions, skin tone and dressing), consisting of the pretraining of an appearance-control block and fine-tuning of an appearance-pose-joint-control block over human dance poses of the same dataset. Our novel design enables robust appearance control with temporally consistent upper body, facial attributes, and even background. The model also generalizes well on unseen human identities and complex motion sequences without the need for any fine-tuning with additional data with diverse human attributes by leveraging the prior knowledge of image diffusion models. Moreover, the proposed model is easy to use and can be considered as a plug-in module/extension to Stable Diffusion. We also demonstrate the model's ability for zero-shot 2D animation generation, enabling not only the appearance transfer from one identity to another but also allowing for cartoon-like stylization given only pose inputs. Extensive experiments demonstrate our superior performance on the TikTok dataset.},
note = {arXiv:2311.12052 [cs]},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Timothy S.; Gordon, Andrew S.
Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5 Book Section
In: Holloway-Attaway, Lissa; Murray, John T. (Ed.): Interactive Storytelling, vol. 14384, pp. 297–305, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-47657-0 978-3-031-47658-7, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, Narrative, UARC
@incollection{holloway-attaway_playing_2023,
title = {Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5},
author = {Timothy S. Wang and Andrew S. Gordon},
editor = {Lissa Holloway-Attaway and John T. Murray},
url = {https://link.springer.com/10.1007/978-3-031-47658-7_28},
doi = {10.1007/978-3-031-47658-7_28},
isbn = {978-3-031-47657-0 978-3-031-47658-7},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Interactive Storytelling},
volume = {14384},
pages = {297–305},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, Narrative, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Hudson, Taylor; Arstein, Ron; Voss, Clare; Traum, David
Navigating to Success in Multi-Modal Human-Robot Collaboration: Analysis and Corpus Release Miscellaneous
2023, (arXiv:2310.17568 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Natural Language, UARC
@misc{lukin_navigating_2023,
title = {Navigating to Success in Multi-Modal Human-Robot Collaboration: Analysis and Corpus Release},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Taylor Hudson and Ron Arstein and Clare Voss and David Traum},
url = {http://arxiv.org/abs/2310.17568},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Human-guided robotic exploration is a useful approach to gathering information at remote locations, especially those that might be too risky, inhospitable, or inaccessible for humans. Maintaining common ground between the remotely-located partners is a challenge, one that can be facilitated by multi-modal communication. In this paper, we explore how participants utilized multiple modalities to investigate a remote location with the help of a robotic partner. Participants issued spoken natural language instructions and received from the robot: text-based feedback, continuous 2D LIDAR mapping, and upon-request static photographs. We noticed that different strategies were adopted in terms of use of the modalities, and hypothesize that these differences may be correlated with success at several exploration sub-tasks. We found that requesting photos may have improved the identification and counting of some key entities (doorways in particular) and that this strategy did not hinder the amount of overall area exploration. Future work with larger samples may reveal the effects of more nuanced photo and dialogue strategies, which can inform the training of robotic agents. Additionally, we announce the release of our unique multi-modal corpus of human-robot communication in an exploration context: SCOUT, the Situated Corpus on Understanding Transactions.},
note = {arXiv:2310.17568 [cs]},
keywords = {DTIC, Natural Language, UARC},
pubstate = {published},
tppubtype = {misc}
}
Gilani, Setareh Nasihati; Pollard, Kimberly; Traum, David
Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 71–75, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: DTIC, Natural Language, UARC
@inproceedings{nasihati_gilani_multimodal_2023,
title = {Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions},
author = {Setareh Nasihati Gilani and Kimberly Pollard and David Traum},
url = {https://dl.acm.org/doi/10.1145/3610661.3617166},
doi = {10.1145/3610661.3617166},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {71–75},
publisher = {ACM},
address = {Paris France},
keywords = {DTIC, Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators Journal Article
In: Sensors, vol. 23, no. 21, pp. 8694, 2023, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags: DTIC, Machine Learning, UARC, Virtual Humans
@article{awada_predicting_2023,
title = {Predicting Office Workers’ Productivity: A Machine Learning Approach Integrating Physiological, Behavioral, and Psychological Indicators},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://www.mdpi.com/1424-8220/23/21/8694},
doi = {10.3390/s23218694},
issn = {1424-8220},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {Sensors},
volume = {23},
number = {21},
pages = {8694},
abstract = {This research pioneers the application of a machine learning framework to predict the perceived productivity of office workers using physiological, behavioral, and psychological features. Two approaches were compared: the baseline model, predicting productivity based on physiological and behavioral characteristics, and the extended model, incorporating predictions of psychological states such as stress, eustress, distress, and mood. Various machine learning models were utilized and compared to assess their predictive accuracy for psychological states and productivity, with XGBoost emerging as the top performer. The extended model outperformed the baseline model, achieving an R2 of 0.60 and a lower MAE of 10.52, compared to the baseline model’s R2 of 0.48 and MAE of 16.62. The extended model’s feature importance analysis revealed valuable insights into the key predictors of productivity, shedding light on the role of psychological states in the prediction process. Notably, mood and eustress emerged as significant predictors of productivity. Physiological and behavioral features, including skin temperature, electrodermal activity, facial movements, and wrist acceleration, were also identified. Lastly, a comparative analysis revealed that wearable devices (Empatica E4 and H10 Polar) outperformed workstation addons (Kinect camera and computer-usage monitoring application) in predicting productivity, emphasizing the potential utility of wearable devices as an independent tool for assessment of productivity. Implementing the model within smart workstations allows for adaptable environments that boost productivity and overall well-being among office workers.},
keywords = {DTIC, Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Wu, Ian; Rong, Yu; Lucas, Gale M.; Gratch, Jonathan
Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions Miscellaneous
2023, (arXiv:2310.14404 [cs]).
Abstract | Links | BibTeX | Tags: Dialogue, DTIC, UARC, Virtual Humans
@misc{chawla_be_2023,
title = {Be Selfish, But Wisely: Investigating the Impact of Agent Personality in Mixed-Motive Human-Agent Interactions},
author = {Kushal Chawla and Ian Wu and Yu Rong and Gale M. Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2310.14404},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {A natural way to design a negotiation dialogue system is via self-play RL: train an agent that learns to maximize its performance by interacting with a simulated user that has been designed to imitate human-human dialogue data. Although this procedure has been adopted in prior work, we find that it results in a fundamentally flawed system that fails to learn the value of compromise in a negotiation, which can often lead to no agreements (i.e., the partner walking away without a deal), ultimately hurting the model's overall performance. We investigate this observation in the context of the DealOrNoDeal task, a multi-issue negotiation over books, hats, and balls. Grounded in negotiation theory from Economics, we modify the training procedure in two novel ways to design agents with diverse personalities and analyze their performance with human partners. We find that although both techniques show promise, a selfish agent, which maximizes its own performance while also avoiding walkaways, performs superior to other variants by implicitly learning to generate value for both itself and the negotiation partner. We discuss the implications of our findings for what it means to be a successful negotiation dialogue system and how these systems should be designed in the future.},
note = {arXiv:2310.14404 [cs]},
keywords = {Dialogue, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn; Liu, Ruying
A New Perspective on Stress Detection: An Automated Approach for Detecting Eustress and Distress Journal Article
In: IEEE Trans. Affective Comput., pp. 1–15, 2023, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: DTIC, Machine Learning, UARC
@article{awada_new_2023,
title = {A New Perspective on Stress Detection: An Automated Approach for Detecting Eustress and Distress},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll and Ruying Liu},
url = {https://ieeexplore.ieee.org/document/10286408/},
doi = {10.1109/TAFFC.2023.3324910},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
journal = {IEEE Trans. Affective Comput.},
pages = {1–15},
keywords = {DTIC, Machine Learning, UARC},
pubstate = {published},
tppubtype = {article}
}
Prinzing, Michael; Garton, Catherine; Berman, Catherine J.; Zhou, Jieni; West, Taylor Nicole; Gratch, Jonathan; Fredrickson, Barbara
Can AI Agents Help Humans to Connect? Technical Report
PsyArXiv 2023.
Abstract | Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@techreport{prinzing_can_2023,
title = {Can AI Agents Help Humans to Connect?},
author = {Michael Prinzing and Catherine Garton and Catherine J. Berman and Jieni Zhou and Taylor Nicole West and Jonathan Gratch and Barbara Fredrickson},
url = {https://osf.io/muq6s},
doi = {10.31234/osf.io/muq6s},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
institution = {PsyArXiv},
abstract = {This paper reports on a pre-registered experiment designed to test whether artificial agents can help people to create more moments of high-quality connection with other humans. Of four pre-registered hypotheses, we found (partial) support for only one.},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
Privacy-preserving Representation Learning for Speech Understanding Miscellaneous
2023, (arXiv:2310.17194 [eess]).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@misc{tran_privacy-preserving_2023,
title = {Privacy-preserving Representation Learning for Speech Understanding},
author = {Minh Tran and Mohammad Soleymani},
url = {http://arxiv.org/abs/2310.17194},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
publisher = {arXiv},
abstract = {Existing privacy-preserving speech representation learning methods target a single application domain. In this paper, we present a novel framework to anonymize utterance-level speech embeddings generated by pre-trained encoders and show its effectiveness for a range of speech classification tasks. Specifically, given the representations from a pre-trained encoder, we train a Transformer to estimate the representations for the same utterances spoken by other speakers. During inference, the extracted representations can be converted into different identities to preserve privacy. We compare the results with the voice anonymization baselines from the VoicePrivacy 2022 challenge. We evaluate our framework on speaker identification for privacy and emotion recognition, depression classification, and intent classification for utility. Our method outperforms the baselines on privacy and utility in paralinguistic tasks and achieves comparable performance for intent classification.},
note = {arXiv:2310.17194 [eess]},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: AI, UARC, Virtual Humans
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 9798400700552.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {9798400700552},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Liu, Rong; Zhao, Enyu; Liu, Zhiyuan; Feng, Andrew; Easley, Scott John
Instant Photorealistic Style Transfer: A Lightweight and Adaptive Approach Miscellaneous
2023, (arXiv:2309.10011 [cs, eess]).
Abstract | Links | BibTeX | Tags: DTIC, UARC
@misc{liu_instant_2023,
title = {Instant Photorealistic Style Transfer: A Lightweight and Adaptive Approach},
author = {Rong Liu and Enyu Zhao and Zhiyuan Liu and Andrew Feng and Scott John Easley},
url = {http://arxiv.org/abs/2309.10011},
year = {2023},
date = {2023-10-01},
urldate = {2024-05-14},
publisher = {arXiv},
abstract = {In this paper, we propose an Instant Photorealistic Style Transfer (IPST) approach, designed to achieve instant photorealistic style transfer on super-resolution inputs without the need for pre-training on pair-wise datasets or imposing extra constraints. Our method utilizes a lightweight StyleNet to enable style transfer from a style image to a content image while preserving non-color information. To further enhance the style transfer process, we introduce an instance-adaptive optimization to prioritize the photorealism of outputs and accelerate the convergence of the style network, leading to a rapid training completion within seconds. Moreover, IPST is well-suited for multi-frame style transfer tasks, as it retains temporal and multi-view consistency of the multi-frame inputs such as video and Neural Radiance Field (NeRF). Experimental results demonstrate that IPST requires less GPU memory usage, offers faster multi-frame transfer speed, and generates photorealistic outputs, making it a promising solution for various photorealistic transfer applications.},
note = {arXiv:2309.10011 [cs, eess]},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {misc}
}
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
Links | BibTeX | Tags: DTIC, MxR, UARC, Virtual Humans
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {DTIC, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kappas, Arvid; Gratch, Jonathan
These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI Journal Article
In: Affec Sci, 2023, ISSN: 2662-2041, 2662-205X.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{kappas_these_2023,
title = {These Aren’t The Droids You Are Looking for: Promises and Challenges for the Intersection of Affective Science and Robotics/AI},
author = {Arvid Kappas and Jonathan Gratch},
url = {https://link.springer.com/10.1007/s42761-023-00211-3},
doi = {10.1007/s42761-023-00211-3},
issn = {2662-2041, 2662-205X},
year = {2023},
date = {2023-08-01},
urldate = {2023-09-20},
journal = {Affec Sci},
abstract = {Abstract
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
AI research focused on interactions with humans, particularly in the form of robots or virtual agents, has expanded in the last two decades to include concepts related to affective processes. Affective computing is an emerging field that deals with issues such as how the diagnosis of affective states of users can be used to improve such interactions, also with a view to demonstrate affective behavior towards the user. This type of research often is based on two beliefs: (1) artificial emotional intelligence will improve human computer interaction (or more specifically human robot interaction), and (2) we understand the role of affective behavior in human interaction sufficiently to tell artificial systems what to do. However, within affective science the focus of research is often to test a particular assumption, such as “smiles affect liking.” Such focus does not provide the information necessary to synthesize affective behavior in long dynamic and real-time interactions. In consequence, theories do not play a large role in the development of artificial affective systems by engineers, but self-learning systems develop their behavior out of large corpora of recorded interactions. The status quo is characterized by measurement issues, theoretical lacunae regarding prevalence and functions of affective behavior in interaction, and underpowered studies that cannot provide the solid empirical foundation for further theoretical developments. This contribution will highlight some of these challenges and point towards next steps to create a rapprochement between engineers and affective scientists with a view to improving theory and solid applications.
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.
Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents Journal Article
In: Safety Science, vol. 164, pp. 106175, 2023, ISSN: 09257535.
Links | BibTeX | Tags: DTIC, Simulation, UARC, virtual reality
@article{liu_effectiveness_2023,
title = {Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0925753523001170},
doi = {10.1016/j.ssci.2023.106175},
issn = {09257535},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-22},
journal = {Safety Science},
volume = {164},
pages = {106175},
keywords = {DTIC, Simulation, UARC, virtual reality},
pubstate = {published},
tppubtype = {article}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. Volume 45, 2023.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{hale_risk_2023,
title = {Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://escholarship.org/uc/item/7n01v4f9#main},
year = {2023},
date = {2023-08-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {Volume 45},
abstract = {Women and minorities obtain lower salaries when negotiating their employment compensation. Some have suggested that automated negotiation and dispute-resolution technology might address such material inequities. These algorithms elicit the multi-criteria preferences of each side of a dispute and arrive at solutions that are efficient and "provably" fair. In a study that explores the potential benefit of these methods, we highlight cognitive factors that may allow inequities to persist despite these methods. Specifically, risk-averse individuals express lower preferences for salary and as risk-aversion is more common in women and minorities, this translates into a ``provably'' fair lower salary. While this may reflect actual underlying differences in preferences across groups, individuals may be confounding their preferences for salary with their risk preference (i.e., their fear of not reaching an agreement), such that these groups achieve worse outcomes than they should. We further highlight that methodological choices in how negotiation processes are often studied can obscure the magnitude of this effect.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin D; Mee, Dillon; Core, Mark G
Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns Proceedings Article
In: 2023.
Abstract | Links | BibTeX | Tags: DTIC, Learning Sciences, UARC
@inproceedings{nye_generative_2023,
title = {Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns},
author = {Benjamin D Nye and Dillon Mee and Mark G Core},
url = {https://ceur-ws.org/Vol-3487/paper4.pdf},
year = {2023},
date = {2023-07-01},
abstract = {After many years of relatively limited capabilities for generative language models, recent large language models (LLM’s) have demonstrated qualitatively better capabilities for understanding, synthesis, and inference on text. Due to the prominence of ChatGPT’s chat system, both the media and many educational developers have suggested using generative AI to directly tutor students. However, despite surface-level similarity between ChatGPT interactions and tutoring dialogs, generative AI has other strengths which may be substantially more relevant for intelligent tutoring (e.g., detecting misconceptions, improved language translation, content generation) and weaknesses that make it problematic for on-the-fly tutoring (e.g., hallucinations, lack of pedagogical training data). In this paper, we discuss how we are approaching generative LLM’s for tutoring dialogs, for problems such as multi- concept short answer grading and semi-supervised interactive content generation. This work shows interesting opportunities for prompt engineering approaches for short-answer classification, despite sometimes quirky behavior. The time savings for high-quality content generation for tutoring is not yet clear and further research is needed. The paper concludes with a consideration of longer-term equity and access in a world where essential capabilities require low-latency real-time connections to large, pay-peruse models. Risks and mitigating technologies for this kind of “AI digital divide” are discussed, including optimized / edge-computing LLM’s and using generative AI models as simulated students to train specialized tutoring models.},
keywords = {DTIC, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Okado, Yuko; Shiel, Aaron; Carr, Kayla; Rosenberg, Milton; Rice, Enora; Ostrander, Luke; Ju, Megan; Gutierrez, Cassandra; Ramirez, Dilan; Auerbach, Daniel; Aguirre, Angelica; Swartout, William
MentorStudio: Amplifying diverse voices through rapid, self-authorable virtual mentors Proceedings Article
In: 2023, (Publisher: Zenodo).
Abstract | Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, Virtual Agents
@inproceedings{nye_mentorstudio_2023,
title = {MentorStudio: Amplifying diverse voices through rapid, self-authorable virtual mentors},
author = {Benjamin D. Nye and Yuko Okado and Aaron Shiel and Kayla Carr and Milton Rosenberg and Enora Rice and Luke Ostrander and Megan Ju and Cassandra Gutierrez and Dilan Ramirez and Daniel Auerbach and Angelica Aguirre and William Swartout},
url = {https://zenodo.org/record/8226275},
doi = {10.5281/ZENODO.8226275},
year = {2023},
date = {2023-07-01},
urldate = {2024-01-11},
abstract = {Mentoring promotes underserved students' STEM persistence but it is difficult to scale up. Virtual agents can amplify mentors' experiences to larger audiences, which is particularly important for mentors from under-represented backgrounds and for underserved students with less access to mentors. This paper introduces MentorStudio, an online platform that allows real-life mentors to self-record and publish video-based conversational virtual agents. MentorStudio's goals are to increase speed, scheduling flexibility, and autonomy in creating intelligent virtual mentors. MentorStudio platform components are introduced, along with initial feedback regarding usability and acceptance collected from 20 STEM mentors who recorded virtual mentors. Overall, the MentorStudio platform has good ease-of-use and acceptance among mentors and offers a platform capable of recording large number of mentors to expand their reach to an unlimited number of students.},
note = {Publisher: Zenodo},
keywords = {DTIC, Learning Sciences, UARC, Virtual Agents},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-07-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tak, Ala N.; Gratch, Jonathan
Is GPT a Computational Model of Emotion? Detailed Analysis Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{tak_is_2023,
title = {Is GPT a Computational Model of Emotion? Detailed Analysis},
author = {Ala N. Tak and Jonathan Gratch},
url = {https://arxiv.org/abs/2307.13779},
doi = {10.48550/ARXIV.2307.13779},
year = {2023},
date = {2023-07-01},
urldate = {2023-09-20},
abstract = {This paper investigates the emotional reasoning abilities of the GPT family of large language models via a component perspective. The paper first examines how the model reasons about autobiographical memories. Second, it systematically varies aspects of situations to impact emotion intensity and coping tendencies. Even without the use of prompt engineering, it is shown that GPT's predictions align significantly with human-provided appraisals and emotional labels. However, GPT faces difficulties predicting emotion intensity and coping responses. GPT-4 showed the highest performance in the initial study but fell short in the second, despite providing superior results after minor prompt engineering. This assessment brings up questions on how to effectively employ the strong points and address the weak areas of these models, particularly concerning response variability. These studies underscore the merits of evaluating models from a componential perspective.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Teaching Reverse Appraisal to Improve Negotiation Skills Journal Article
In: IEEE Trans. Affective Comput., pp. 1–14, 2023, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{sato_teaching_2023,
title = {Teaching Reverse Appraisal to Improve Negotiation Skills},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/10189838/},
doi = {10.1109/TAFFC.2023.3285931},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-07-01},
urldate = {2023-09-20},
journal = {IEEE Trans. Affective Comput.},
pages = {1–14},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Saxon, Leslie; Boberg, Jill; Faulk, Robert; Barrett, Trevor
Identifying relationships between compression garments and recovery in a military training environment Technical Report
In Review 2023.
Abstract | Links | BibTeX | Tags: CBC, DTIC, UARC
@techreport{saxon_identifying_2023,
title = {Identifying relationships between compression garments and recovery in a military training environment},
author = {Leslie Saxon and Jill Boberg and Robert Faulk and Trevor Barrett},
url = {https://www.researchsquare.com/article/rs-3193173/v1},
doi = {10.21203/rs.3.rs-3193173/v1},
year = {2023},
date = {2023-07-01},
urldate = {2023-09-21},
institution = {In Review},
abstract = {Abstract
Development and maintenance of physical capabilities is an essential part of combat readiness in the military. This readiness requires continuous training and is therefore compromised by injury. Because Service Members (SMs) must be physically and cognitively prepared to conduct multifaceted operations in support of strategic objectives, and because the Department of Defense’s (DoD) non-deployable rate and annual costs associated with treating SMs continue to rise at an alarming rate, finding a far-reaching and efficient solution to prevent such injuries is a high priority. Compression garments (CGs) have become increasingly popular over the past decade in human performance applications, and reportedly facilitate post-exercise recovery by reducing muscle soreness, increasing blood lactate removal, and increasing perception of recovery, but the evidence is mixed, at best. In the current study we explored whether CG use, and duration of use, improves recovery and mitigates muscle soreness effectively in an elite Marine training course. In order to test this, we subjected Service Members to fatiguing exercise and then measured subjective and objective recovery and soreness using participant reports and grip and leg strength over a 72-hour recovery period. Findings from this study suggest that wearing CGs for post training recovery showed significant and moderate positive effects on subjective soreness, fatigue, and perceived level of recovery. We did not find statistically significant effects on physical performance while testing grip or leg strength. These findings suggest that CG may be a beneficial strategy for military training environments to accelerate muscle recovery after high-intensity exercise, without adverse effects to the wearer or negative impact on military training.},
keywords = {CBC, DTIC, UARC},
pubstate = {published},
tppubtype = {techreport}
}
Development and maintenance of physical capabilities is an essential part of combat readiness in the military. This readiness requires continuous training and is therefore compromised by injury. Because Service Members (SMs) must be physically and cognitively prepared to conduct multifaceted operations in support of strategic objectives, and because the Department of Defense’s (DoD) non-deployable rate and annual costs associated with treating SMs continue to rise at an alarming rate, finding a far-reaching and efficient solution to prevent such injuries is a high priority. Compression garments (CGs) have become increasingly popular over the past decade in human performance applications, and reportedly facilitate post-exercise recovery by reducing muscle soreness, increasing blood lactate removal, and increasing perception of recovery, but the evidence is mixed, at best. In the current study we explored whether CG use, and duration of use, improves recovery and mitigates muscle soreness effectively in an elite Marine training course. In order to test this, we subjected Service Members to fatiguing exercise and then measured subjective and objective recovery and soreness using participant reports and grip and leg strength over a 72-hour recovery period. Findings from this study suggest that wearing CGs for post training recovery showed significant and moderate positive effects on subjective soreness, fatigue, and perceived level of recovery. We did not find statistically significant effects on physical performance while testing grip or leg strength. These findings suggest that CG may be a beneficial strategy for military training environments to accelerate muscle recovery after high-intensity exercise, without adverse effects to the wearer or negative impact on military training.
Rodrigues, Patrick B.; Singh, Rashmi; Oytun, Mert; Adami, Pooya; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale M.
A multidimensional taxonomy for human-robot interaction in construction Journal Article
In: Automation in Construction, vol. 150, pp. 104845, 2023, ISSN: 0926-5805.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{rodrigues_multidimensional_2023,
title = {A multidimensional taxonomy for human-robot interaction in construction},
author = {Patrick B. Rodrigues and Rashmi Singh and Mert Oytun and Pooya Adami and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale M. Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S092658052300105X},
doi = {10.1016/j.autcon.2023.104845},
issn = {0926-5805},
year = {2023},
date = {2023-06-01},
urldate = {2023-03-31},
journal = {Automation in Construction},
volume = {150},
pages = {104845},
abstract = {Despite the increased interest in construction robotics both in academia and the industry, insufficient attention has been given to aspects related to Human-Robot Interaction (HRI). Characterizing HRI for construction tasks can help researchers organize knowledge in a structured manner that allows for classifying construction robotics applications and comparing and benchmarking different studies. This paper builds upon existing taxonomies and empirical studies in HRI in various industries (e.g., construction, manufacturing, and military, among others) to propose a multidimensional taxonomy to characterize HRI applications in the construction industry. The taxonomy design followed a systematic literature review in which common themes were identified and grouped into 16 categories. The proposed taxonomy can be used as a foundation for systematic reviews and meta-analyses of HRI applications in construction and can benefit the construction industry by informing the design of collaborative tasks performed by human-robot teams.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; Gil, Yolanda
Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation Book Section
In: Wang, Ning; Rebolledo-Mendez, Genaro; Dimitrova, Vania; Matsuda, Noboru; Santos, Olga C. (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky, vol. 1831, pp. 530–535, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36335-1 978-3-031-36336-8, (Series Title: Communications in Computer and Information Science).
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{wang_virtual_2023,
title = {Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch and Yolanda Gil},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Vania Dimitrova and Noboru Matsuda and Olga C. Santos},
url = {https://link.springer.com/10.1007/978-3-031-36336-8_82},
doi = {10.1007/978-3-031-36336-8_82},
isbn = {978-3-031-36335-1 978-3-031-36336-8},
year = {2023},
date = {2023-06-01},
urldate = {2023-09-20},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky},
volume = {1831},
pages = {530–535},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; Miller, John H.; Pynadath, David V.
The Role of Heuristics and Biases during Complex Choices with an AI Teammate Journal Article
In: AAAI, vol. 37, no. 5, pp. 5993–6001, 2023, ISSN: 2374-3468, 2159-5399.
Abstract | Links | BibTeX | Tags: AI, DTIC, Social Simulation, UARC
@article{gurney_role_2023,
title = {The Role of Heuristics and Biases during Complex Choices with an AI Teammate},
author = {Nikolos Gurney and John H. Miller and David V. Pynadath},
url = {https://ojs.aaai.org/index.php/AAAI/article/view/25741},
doi = {10.1609/aaai.v37i5.25741},
issn = {2374-3468, 2159-5399},
year = {2023},
date = {2023-06-01},
urldate = {2023-12-08},
journal = {AAAI},
volume = {37},
number = {5},
pages = {5993–6001},
abstract = {Behavioral scientists have classically documented aversion to algorithmic decision aids, from simple linear models to AI. Sentiment, however, is changing and possibly accelerating AI helper usage. AI assistance is, arguably, most valuable when humans must make complex choices. We argue that classic experimental methods used to study heuristics and biases are insufficient for studying complex choices made with AI helpers. We adapted an experimental paradigm designed for studying complex choices in such contexts. We show that framing and anchoring effects impact how people work with an AI helper and are predictive of choice outcomes. The evidence suggests that some participants, particularly those in a loss frame, put too much faith in the AI helper and experienced worse choice outcomes by doing so. The paradigm also generates computational modeling-friendly data allowing future studies of human-AI decision making.},
keywords = {AI, DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Tran, Minh; Soleymani, Mohammad
A Speech Representation Anonymization Framework via Selective Noise Perturbation Proceedings Article
In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1–5, IEEE, Rhodes Island, Greece, 2023, ISBN: 978-1-72816-327-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{tran_speech_2023,
title = {A Speech Representation Anonymization Framework via Selective Noise Perturbation},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10095173/},
doi = {10.1109/ICASSP49357.2023.10095173},
isbn = {978-1-72816-327-7},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1–5},
publisher = {IEEE},
address = {Rhodes Island, Greece},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}