Publications
Search
Klumpe, Stella; Mitchell, Kelsey C.; Cox, Emma; Katz, Jeffrey S.; Lazarowski, Lucia; Deshpande, Gopikrishna; Gratch, Jonathan; Visser, Ewart J. De; Ayaz, Hasan; Li, Xingnan; Franke, Adrian A.; Krueger, Frank
Social bonding between humans, animals, and robots: Dogs outperform AIBOs, their robotic replicas, as social companions Journal Article
In: PLoS One, vol. 20, no. 6, pp. e0324312, 2025, ISSN: 1932-6203.
@article{klumpe_social_2025,
title = {Social bonding between humans, animals, and robots: Dogs outperform AIBOs, their robotic replicas, as social companions},
author = {Stella Klumpe and Kelsey C. Mitchell and Emma Cox and Jeffrey S. Katz and Lucia Lazarowski and Gopikrishna Deshpande and Jonathan Gratch and Ewart J. De Visser and Hasan Ayaz and Xingnan Li and Adrian A. Franke and Frank Krueger},
editor = {Casey R. Lynch},
url = {https://dx.plos.org/10.1371/journal.pone.0324312},
doi = {10.1371/journal.pone.0324312},
issn = {1932-6203},
year = {2025},
date = {2025-06-01},
urldate = {2025-06-12},
journal = {PLoS One},
volume = {20},
number = {6},
pages = {e0324312},
abstract = {In the evolving landscape of technology, robots have emerged as social companions, prompting an investigation into social bonding between humans and robots. While human-animal interactions are well-studied, human-robot interactions (HRI) remain comparatively underexplored. Ethorobotics, a field of social robotic engineering based on ecology and ethology, suggests designing companion robots modeled on animal companions, which are simpler to emulate than humans. However, it is unclear whether these robots can match the social companionship provided by their original models. This study examined social bonding between humans and AIBOs, dog-inspired companion robots, compared to real dogs. Nineteen female participants engaged in 12 affiliative interactions with dogs and AIBOs across two counter-balanced, one-month bonding phases. Social bonding was assessed through urinary oxytocin (OXT) level change over an interaction, self-reported attachment using an adapted version of the Lexington Attachment to Pets Scale, and social companionship evaluations administering the Robot-Dog Questionnaire. To examine OXT level changes and self-reported attachment by comparing the two social companions, we conducted mixed-effects model analyses and planned follow-up comparisons. Frequency comparison, binary logistic regression, and thematic analysis were performed to analyze social companionship evaluations. Results revealed significant differences between dogs and AIBOs in fostering social bonds. OXT level change increased during interactions with dogs but decreased with AIBOs. Participants reported stronger attachment to dogs and rated them as better social companions. These findings highlight the current limitations of AIBOs in fostering social bonding immediately compared to dogs. Our study contributes to the growing HRI research by demonstrating an existing gap between AIBOs and dogs as social companions. It highlights the need for further investigation to understand the complexities of social bonding with companion robots, which is essential to implement successful applications for social robots in diverse domains such as the elderly and health care, education, and entertainment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Miller, John H.; Pynadath, David V.
Exploring the choice landscape: Anchoring and framing effects on search behavior in complex choices Journal Article
In: Journal of Choice Modelling, vol. 55, pp. 100549, 2025, ISSN: 17555345.
@article{gurney_exploring_2025,
title = {Exploring the choice landscape: Anchoring and framing effects on search behavior in complex choices},
author = {Nikolos Gurney and John H. Miller and David V. Pynadath},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1755534525000120},
doi = {10.1016/j.jocm.2025.100549},
issn = {17555345},
year = {2025},
date = {2025-06-01},
urldate = {2025-04-15},
journal = {Journal of Choice Modelling},
volume = {55},
pages = {100549},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew
Logical Abduction as a Computational Model of Narrative Proceedings Article
In: Geneva, Switzerland, 2025.
@inproceedings{gordon_andrew_logical_2025,
title = {Logical Abduction as a Computational Model of Narrative},
author = {Andrew Gordon},
url = {chrome-extension://efaidnbmnnnibpcajpcglclefindmkaj/https://asgordon.github.io/publications/CMN2025.PDF},
year = {2025},
date = {2025-05-01},
address = {Geneva, Switzerland},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
The Impact of Color Correlated Temperature and Illuminance Levels of Office Lighting on Stress and Cognitive Restoration Journal Article
In: Journal of Environmental Psychology, pp. 102628, 2025, ISSN: 02724944.
@article{awada_impact_2025,
title = {The Impact of Color Correlated Temperature and Illuminance Levels of Office Lighting on Stress and Cognitive Restoration},
author = {Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494425001112},
doi = {10.1016/j.jenvp.2025.102628},
issn = {02724944},
year = {2025},
date = {2025-05-01},
urldate = {2025-05-20},
journal = {Journal of Environmental Psychology},
pages = {102628},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Fu, Boxi; Dincer, Betul; Masur, Omkar; Faizi, David; Ravindran, Harshul; Wang, Julia; Lai, Devashish; Merchant, Chirag
Becoming Fei: An Educational Game for AI and Data Science Education for Novice Learners Book Section
In: Smith, Brian K.; Borge, Marcela (Ed.): Learning and Collaboration Technologies, vol. 15808, pp. 69–79, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-93745-3 978-3-031-93746-0, (Series Title: Lecture Notes in Computer Science).
@incollection{smith_becoming_2025,
title = {Becoming Fei: An Educational Game for AI and Data Science Education for Novice Learners},
author = {Ning Wang and Boxi Fu and Betul Dincer and Omkar Masur and David Faizi and Harshul Ravindran and Julia Wang and Devashish Lai and Chirag Merchant},
editor = {Brian K. Smith and Marcela Borge},
url = {https://link.springer.com/10.1007/978-3-031-93746-0_6},
doi = {10.1007/978-3-031-93746-0_6},
isbn = {978-3-031-93745-3 978-3-031-93746-0},
year = {2025},
date = {2025-05-01},
urldate = {2025-06-12},
booktitle = {Learning and Collaboration Technologies},
volume = {15808},
pages = {69–79},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Core, Mark; Nye, Benjamin; Carr, Kayla; Li, Shirley; Shiel, Aaron; Auerbach, Daniel; Leeds, Andrew; Swartout, William
Usability and Preferences for a Personalized Adaptive Learning System for AI Upskilling Journal Article
In: FLAIRS, vol. 38, 2025, ISSN: 2334-0762, 2334-0754.
@article{core_usability_2025,
title = {Usability and Preferences for a Personalized Adaptive Learning System for AI Upskilling},
author = {Mark Core and Benjamin Nye and Kayla Carr and Shirley Li and Aaron Shiel and Daniel Auerbach and Andrew Leeds and William Swartout},
url = {https://journals.flvc.org/FLAIRS/article/view/138996},
doi = {10.32473/flairs.38.1.138996},
issn = {2334-0762, 2334-0754},
year = {2025},
date = {2025-05-01},
urldate = {2025-05-20},
journal = {FLAIRS},
volume = {38},
abstract = {As AI tools become common across jobs and industries, it is critical to broaden education about AI beyond teaching computer scientists how to build AI systems. To expand AI education, we are researching AI for AI learning: a personalized and adaptive learning system that integrates dialog-based tutoring and gamified programming activities. To study this problem, we adapted and expanded an existing smartphone adaptive coach to develop the Game-if-AI system. Using a design-based research approach, Game-if-AI was iteratively tested and improved across four semesters of optional use in a course designed for technician-level understanding of AI: mastering programming skills to apply AI libraries and established models. In this study, we measured the interests and needs of these technical learners, based on both survey data and on how they engaged with topics in the system. Based on this data, new topics were added and the system was refined. In this paper, we report students' usability ratings for system components and student preferences based on completion rates of AI topics available each semester. Students rated the adaptive system positively overall (93% rated as a "good idea"), but more complex learning activities (tutoring dialogs, programming) were rated lower than traditional ones (e.g., multiple choice, reading). Students were most likely to master topics highly aligned to the course materials, as well as self-directed learning toward easier high-interest topics (e.g., LLM Prompting).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Int J Artif Intell Educ, 2025, ISSN: 1560-4292, 1560-4306.
@article{okado_how_2025,
title = {How Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
url = {https://link.springer.com/10.1007/s40593-025-00482-w},
doi = {10.1007/s40593-025-00482-w},
issn = {1560-4292, 1560-4306},
year = {2025},
date = {2025-05-01},
urldate = {2025-06-24},
journal = {Int J Artif Intell Educ},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lin, Spencer; Jun, Miru; Rizk, Basem; Shieh, Karen; Fisher, Scott; Mozgai, Sharon
Optimizing SIA Development: A Case Study in User-Centered Design for Estuary, a Multimodal Socially Interactive Agent Framework Proceedings Article
In: Proceedings of the Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–9, 2025, (arXiv:2504.14427 [cs]).
@inproceedings{lin_optimizing_2025,
title = {Optimizing SIA Development: A Case Study in User-Centered Design for Estuary, a Multimodal Socially Interactive Agent Framework},
author = {Spencer Lin and Miru Jun and Basem Rizk and Karen Shieh and Scott Fisher and Sharon Mozgai},
url = {http://arxiv.org/abs/2504.14427},
doi = {10.1145/3706599.3707399},
year = {2025},
date = {2025-04-01},
urldate = {2025-05-20},
booktitle = {Proceedings of the Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–9},
abstract = {This case study presents our user-centered design model for Socially Intelligent Agent (SIA) development frameworks through our experience developing Estuary, an open source multimodal framework for building low-latency real-time socially interactive agents. We leverage the Rapid Assessment Process (RAP) to collect the thoughts of leading researchers in the field of SIAs regarding the current state of the art for SIA development as well as their evaluation of how well Estuary may potentially address current research gaps. We achieve this through a series of end-user interviews conducted by a fellow researcher in the community. We hope that the findings of our work will not only assist the continued development of Estuary but also guide the development of other future frameworks and technologies for SIAs.},
note = {arXiv:2504.14427 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Rakshit, Sushrita; Chawla, Kushal; Brett, Jeanne M.; Gratch, Jonathan
KODIS: A Multicultural Dispute Resolution Dialogue Corpus Miscellaneous
2025, (arXiv:2504.12723 [cs]).
@misc{hale_kodis_2025,
title = {KODIS: A Multicultural Dispute Resolution Dialogue Corpus},
author = {James Hale and Sushrita Rakshit and Kushal Chawla and Jeanne M. Brett and Jonathan Gratch},
url = {http://arxiv.org/abs/2504.12723},
doi = {10.48550/arXiv.2504.12723},
year = {2025},
date = {2025-04-01},
urldate = {2025-05-20},
publisher = {arXiv},
abstract = {We present KODIS, a dyadic dispute resolution corpus containing thousands of dialogues from over 75 countries. Motivated by a theoretical model of culture and conflict, participants engage in a typical customer service dispute designed by experts to evoke strong emotions and conflict. The corpus contains a rich set of dispositional, process, and outcome measures. The initial analysis supports theories of how anger expressions lead to escalatory spirals and highlights cultural differences in emotional expression. We make this corpus and data collection framework available to the community.},
note = {arXiv:2504.12723 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Chaubey, Ashutosh; Guan, Xulang; Soleymani, Mohammad
Face-LLaVA: Facial Expression and Attribute Understanding through Instruction Tuning Miscellaneous
2025, (Version Number: 1).
@misc{chaubey_face-llava_2025,
title = {Face-LLaVA: Facial Expression and Attribute Understanding through Instruction Tuning},
author = {Ashutosh Chaubey and Xulang Guan and Mohammad Soleymani},
url = {https://arxiv.org/abs/2504.07198},
doi = {10.48550/ARXIV.2504.07198},
year = {2025},
date = {2025-04-01},
urldate = {2025-04-15},
publisher = {arXiv},
abstract = {The human face plays a central role in social communication, necessitating the use of performant computer vision tools for human-centered applications. We propose Face-LLaVA, a multimodal large language model for face-centered, in-context learning, including facial expression and attribute recognition. Additionally, Face-LLaVA is able to generate natural language descriptions that can be used for reasoning. Leveraging existing visual databases, we first developed FaceInstruct-1M, a face-centered database for instruction tuning MLLMs for face processing. We then developed a novel face-specific visual encoder powered by Face-Region Guided Cross-Attention that integrates face geometry with local visual features. We evaluated the proposed method across nine different datasets and five different face processing tasks, including facial expression recognition, action unit detection, facial attribute detection, age estimation and deepfake detection. Face-LLaVA achieves superior results compared to existing open-source MLLMs and competitive performance compared to commercial solutions. Our model output also receives a higher reasoning rating by GPT under a zero-shot setting across all the tasks. Both our dataset and model wil be released at https://face-llava.github.io to support future advancements in social AI and foundational vision-language research.},
note = {Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Fonseca, Henrique Correia Da; Melo, Celso M. De; Terada, Kazunori; Gratch, Jonathan; Paiva, Ana S.; Santos, Francisco C.
Evolution of indirect reciprocity under emotion expression Journal Article
In: Sci Rep, vol. 15, no. 1, pp. 9151, 2025, ISSN: 2045-2322.
@article{correia_da_fonseca_evolution_2025,
title = {Evolution of indirect reciprocity under emotion expression},
author = {Henrique Correia Da Fonseca and Celso M. De Melo and Kazunori Terada and Jonathan Gratch and Ana S. Paiva and Francisco C. Santos},
url = {https://www.nature.com/articles/s41598-025-89588-8},
doi = {10.1038/s41598-025-89588-8},
issn = {2045-2322},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-20},
journal = {Sci Rep},
volume = {15},
number = {1},
pages = {9151},
abstract = {Abstract
Do emotion expressions impact the evolution of cooperation? Indirect Reciprocity offers a solution to the cooperation dilemma with prior work focusing on the role of social norms in propagating others’ reputations and contributing to evolutionarily stable cooperation. Recent experimental studies, however, show that emotion expressions shape pro-social behaviour, communicate one’s intentions to others, and serve an error-correcting function; yet, the role of emotion signals in the evolution of cooperation remains unexplored. We present the first model of IR based on evolutionary game theory that exposes how emotion expressions positively influence the evolution of cooperation, particularly in scenarios of frequent errors. Our findings provide evolutionary support for the existence of emotion-based social norms, which help foster cooperation among unrelated individuals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Do emotion expressions impact the evolution of cooperation? Indirect Reciprocity offers a solution to the cooperation dilemma with prior work focusing on the role of social norms in propagating others’ reputations and contributing to evolutionarily stable cooperation. Recent experimental studies, however, show that emotion expressions shape pro-social behaviour, communicate one’s intentions to others, and serve an error-correcting function; yet, the role of emotion signals in the evolution of cooperation remains unexplored. We present the first model of IR based on evolutionary game theory that exposes how emotion expressions positively influence the evolution of cooperation, particularly in scenarios of frequent errors. Our findings provide evolutionary support for the existence of emotion-based social norms, which help foster cooperation among unrelated individuals.
Jalal-Kamali, Ali; Gurney, Nikolos; Pynadath, David
Predicting Team Performance from Communications in Simulated Search-and-Rescue Miscellaneous
2025, (arXiv:2503.03791 [cs]).
@misc{jalal-kamali_predicting_2025,
title = {Predicting Team Performance from Communications in Simulated Search-and-Rescue},
author = {Ali Jalal-Kamali and Nikolos Gurney and David Pynadath},
url = {http://arxiv.org/abs/2503.03791},
doi = {10.48550/arXiv.2503.03791},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-18},
publisher = {arXiv},
abstract = {Understanding how individual traits influence team performance is valuable, but these traits are not always directly observable. Prior research has inferred traits like trust from behavioral data. We analyze conversational data to identify team traits and their correlation with teaming outcomes. Using transcripts from a Minecraft-based search-and-rescue experiment, we apply topic modeling and clustering to uncover key interaction patterns. Our findings show that variations in teaming outcomes can be explained through these inferences, with different levels of predictive power derived from individual traits and team dynamics.},
note = {arXiv:2503.03791 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Becerik-Gerber, Burcin; Pynadath, David V.; Marti, Deniz; Lucas, Gale M.
Elicitation and verification of learning via experts (EVOLVE) for creating a theoretical framework for active shooter incidents Journal Article
In: Developments in the Built Environment, vol. 21, pp. 100635, 2025, ISSN: 26661659.
@article{liu_elicitation_2025,
title = {Elicitation and verification of learning via experts (EVOLVE) for creating a theoretical framework for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and David V. Pynadath and Deniz Marti and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2666165925000353},
doi = {10.1016/j.dibe.2025.100635},
issn = {26661659},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-18},
journal = {Developments in the Built Environment},
volume = {21},
pages = {100635},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ustun, Volkan; Hans, Soham; Kumar, Rajay; Wang, Yunzhe
Abstracting Geo-specific Terrains to Scale Up Reinforcement Learning Miscellaneous
2025, (arXiv:2503.20078 [cs]).
@misc{ustun_abstracting_2025,
title = {Abstracting Geo-specific Terrains to Scale Up Reinforcement Learning},
author = {Volkan Ustun and Soham Hans and Rajay Kumar and Yunzhe Wang},
url = {http://arxiv.org/abs/2503.20078},
doi = {10.48550/arXiv.2503.20078},
year = {2025},
date = {2025-03-01},
urldate = {2025-04-15},
publisher = {arXiv},
abstract = {Multi-agent reinforcement learning (MARL) is increasingly ubiquitous in training dynamic and adaptive synthetic characters for interactive simulations on geo-specific terrains. Frameworks such as Unity's ML-Agents help to make such reinforcement learning experiments more accessible to the simulation community. Military training simulations also benefit from advances in MARL, but they have immense computational requirements due to their complex, continuous, stochastic, partially observable, non-stationary, and doctrine-based nature. Furthermore, these simulations require geo-specific terrains, further exacerbating the computational resources problem. In our research, we leverage Unity's waypoints to automatically generate multi-layered representation abstractions of the geo-specific terrains to scale up reinforcement learning while still allowing the transfer of learned policies between different representations. Our early exploratory results on a novel MARL scenario, where each side has differing objectives, indicate that waypoint-based navigation enables faster and more efficient learning while producing trajectories similar to those taken by expert human players in CSGO gaming environments. This research points out the potential of waypoint-based navigation for reducing the computational costs of developing and training MARL models for military training simulations, where geo-specific terrains and differing objectives are crucial.},
note = {arXiv:2503.20078 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Gurney, Nikolos; Pynadath, David V.; Miller, John H.
Willingness to work as a predictor of human-agent team success Journal Article
In: Front. Comput. Sci., vol. 7, pp. 1405436, 2025, ISSN: 2624-9898.
@article{gurney_willingness_2025,
title = {Willingness to work as a predictor of human-agent team success},
author = {Nikolos Gurney and David V. Pynadath and John H. Miller},
url = {https://www.frontiersin.org/articles/10.3389/fcomp.2025.1405436/full},
doi = {10.3389/fcomp.2025.1405436},
issn = {2624-9898},
year = {2025},
date = {2025-03-01},
urldate = {2025-04-15},
journal = {Front. Comput. Sci.},
volume = {7},
pages = {1405436},
abstract = {Research shows that the effectiveness of human-agent teams depends heavily on human team members' prior experiences, whether from direct teaming activities or relevant domain knowledge. While researchers have proposed various mechanisms to explain this relationship, we present a simpler alternative explanation: experience serves primarily as an indicator of a person's fundamental willingness to engage in teaming tasks. We introduce a measure called “willingness to work” that quantifies this underlying disposition. Our empirical analysis demonstrates that this straightforward metric robustly predicts human-agent team performance. Beyond its practical value as a predictive tool, this reconceptualization of the experience-performance relationship necessitates a fresh examination of existing findings in the field. The results suggest that a team member's basic willingness to invest effort may be more fundamental to success than previously recognized mechanisms.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Siniukov, Maksim; Chang, Di; Tran, Minh; Gong, Hongkun; Chaubey, Ashutosh; Soleymani, Mohammad
DiTaiListener: Controllable High Fidelity Listener Video Generation with Diffusion Miscellaneous
2025, (Version Number: 1).
@misc{siniukov_ditailistener_2025,
title = {DiTaiListener: Controllable High Fidelity Listener Video Generation with Diffusion},
author = {Maksim Siniukov and Di Chang and Minh Tran and Hongkun Gong and Ashutosh Chaubey and Mohammad Soleymani},
url = {https://arxiv.org/abs/2504.04010},
doi = {10.48550/ARXIV.2504.04010},
year = {2025},
date = {2025-03-01},
urldate = {2025-04-15},
publisher = {arXiv},
abstract = {Generating naturalistic and nuanced listener motions for extended interactions remains an open problem. Existing methods often rely on low-dimensional motion codes for facial behavior generation followed by photorealistic rendering, limiting both visual fidelity and expressive richness. To address these challenges, we introduce DiTaiListener, powered by a video diffusion model with multimodal conditions. Our approach first generates short segments of listener responses conditioned on the speaker's speech and facial motions with DiTaiListener-Gen. It then refines the transitional frames via DiTaiListener-Edit for a seamless transition. Specifically, DiTaiListener-Gen adapts a Diffusion Transformer (DiT) for the task of listener head portrait generation by introducing a Causal Temporal Multimodal Adapter (CTM-Adapter) to process speakers' auditory and visual cues. CTM-Adapter integrates speakers' input in a causal manner into the video generation process to ensure temporally coherent listener responses. For long-form video generation, we introduce DiTaiListener-Edit, a transition refinement video-to-video diffusion model. The model fuses video segments into smooth and continuous videos, ensuring temporal consistency in facial expressions and image quality when merging short video segments produced by DiTaiListener-Gen. Quantitatively, DiTaiListener achieves the state-of-the-art performance on benchmark datasets in both photorealism (+73.8% in FID on RealTalk) and motion representation (+6.1% in FD metric on VICO) spaces. User studies confirm the superior performance of DiTaiListener, with the model being the clear preference in terms of feedback, diversity, and smoothness, outperforming competitors by a significant margin.},
note = {Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.; Busta, Kelly
Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities Journal Article
In: International Journal of Disaster Risk Reduction, vol. 118, pp. 105225, 2025, ISSN: 22124209.
@article{liu_impact_2025,
title = {Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas and Kelly Busta},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2212420925000494},
doi = {10.1016/j.ijdrr.2025.105225},
issn = {22124209},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-20},
journal = {International Journal of Disaster Risk Reduction},
volume = {118},
pages = {105225},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Ruying; Becerik-Gerber, Burçin; Lucas, Gale M.
Investigating Role of Personal Factors in Shaping Responses to Active Shooter Incident using Machine Learning Miscellaneous
2025, (arXiv:2503.05719 [cs]).
@misc{liu_investigating_2025,
title = {Investigating Role of Personal Factors in Shaping Responses to Active Shooter Incident using Machine Learning},
author = {Ruying Liu and Burçin Becerik-Gerber and Gale M. Lucas},
url = {http://arxiv.org/abs/2503.05719},
doi = {10.48550/arXiv.2503.05719},
year = {2025},
date = {2025-02-01},
urldate = {2025-03-18},
publisher = {arXiv},
abstract = {This study bridges the knowledge gap on how personal factors affect building occupants' responses in active shooter situations by applying interpretable machine learning methods to data from 107 participants. The personal factors studied are training methods, prior training experience, sense of direction, and gender. The response performance measurements consist of decisions (run, hide, multiple), vulnerability (corresponding to the time a participant is visible to a shooter), and pre-evacuation time. The results indicate that the propensity to run significantly determines overall response strategies, overshadowing vulnerability, and pre-evacuation time. The training method is a critical factor where VR-based training leads to better responses than video-based training. A better sense of direction and previous training experience are correlated with a greater propensity to run and less vulnerability. Gender slightly influences decisions and vulnerability but significantly impacts pre-evacuation time, with females evacuating slower, potentially due to higher risk perception. This study underscores the importance of personal factors in shaping responses to active shooter incidents.},
note = {arXiv:2503.05719 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Siniukov, Maksim; Xing, Ellie; Sanaz,; Isfahani, Attaripour; Soleymani, Mohammad
Towards a Generalizable Speech Marker for Parkinson's Disease Diagnosis Miscellaneous
2025, (Version Number: 1).
@misc{siniukov_towards_2025,
title = {Towards a Generalizable Speech Marker for Parkinson's Disease Diagnosis},
author = {Maksim Siniukov and Ellie Xing and Sanaz and Attaripour Isfahani and Mohammad Soleymani},
url = {https://arxiv.org/abs/2501.03581},
doi = {10.48550/ARXIV.2501.03581},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-14},
publisher = {arXiv},
abstract = {Parkinson's Disease (PD) is a neurodegenerative disorder characterized by motor symptoms, including altered voice production in the early stages. Early diagnosis is crucial not only to improve PD patients' quality of life but also to enhance the efficacy of potential disease-modifying therapies during early neurodegeneration, a window often missed by current diagnostic tools. In this paper, we propose a more generalizable approach to PD recognition through domain adaptation and self-supervised learning. We demonstrate the generalization capabilities of the proposed approach across diverse datasets in different languages. Our approach leverages HuBERT, a large deep neural network originally trained for speech recognition and further trains it on unlabeled speech data from a population that is similar to the target group, i.e., the elderly, in a self-supervised manner. The model is then fine-tuned and adapted for use across different datasets in multiple languages, including English, Italian, and Spanish. Evaluations on four publicly available PD datasets demonstrate the model's efficacy, achieving an average specificity of 92.1% and an average sensitivity of 91.2%. This method offers objective and consistent evaluations across large populations, addressing the variability inherent in human assessments and providing a non-invasive, cost-effective and accessible diagnostic option.},
note = {Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Rodrigues, Patrick Borges; Becerik-Gerber, Burcin; Soibelman, Lucio; Lucas, Gale M.; Roll, Shawn C.
Impact of selective environmental sound attenuation on operator performance, stress, attention, and task engagement in teleoperated demolition Journal Article
In: Automation in Construction, vol. 169, pp. 105876, 2025, ISSN: 09265805.
@article{rodrigues_impact_2025,
title = {Impact of selective environmental sound attenuation on operator performance, stress, attention, and task engagement in teleoperated demolition},
author = {Patrick Borges Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0926580524006125},
doi = {10.1016/j.autcon.2024.105876},
issn = {09265805},
year = {2025},
date = {2025-01-01},
urldate = {2024-12-20},
journal = {Automation in Construction},
volume = {169},
pages = {105876},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2025
Klumpe, Stella; Mitchell, Kelsey C.; Cox, Emma; Katz, Jeffrey S.; Lazarowski, Lucia; Deshpande, Gopikrishna; Gratch, Jonathan; Visser, Ewart J. De; Ayaz, Hasan; Li, Xingnan; Franke, Adrian A.; Krueger, Frank
Social bonding between humans, animals, and robots: Dogs outperform AIBOs, their robotic replicas, as social companions Journal Article
In: PLoS One, vol. 20, no. 6, pp. e0324312, 2025, ISSN: 1932-6203.
Abstract | Links | BibTeX | Tags: DTIC
@article{klumpe_social_2025,
title = {Social bonding between humans, animals, and robots: Dogs outperform AIBOs, their robotic replicas, as social companions},
author = {Stella Klumpe and Kelsey C. Mitchell and Emma Cox and Jeffrey S. Katz and Lucia Lazarowski and Gopikrishna Deshpande and Jonathan Gratch and Ewart J. De Visser and Hasan Ayaz and Xingnan Li and Adrian A. Franke and Frank Krueger},
editor = {Casey R. Lynch},
url = {https://dx.plos.org/10.1371/journal.pone.0324312},
doi = {10.1371/journal.pone.0324312},
issn = {1932-6203},
year = {2025},
date = {2025-06-01},
urldate = {2025-06-12},
journal = {PLoS One},
volume = {20},
number = {6},
pages = {e0324312},
abstract = {In the evolving landscape of technology, robots have emerged as social companions, prompting an investigation into social bonding between humans and robots. While human-animal interactions are well-studied, human-robot interactions (HRI) remain comparatively underexplored. Ethorobotics, a field of social robotic engineering based on ecology and ethology, suggests designing companion robots modeled on animal companions, which are simpler to emulate than humans. However, it is unclear whether these robots can match the social companionship provided by their original models. This study examined social bonding between humans and AIBOs, dog-inspired companion robots, compared to real dogs. Nineteen female participants engaged in 12 affiliative interactions with dogs and AIBOs across two counter-balanced, one-month bonding phases. Social bonding was assessed through urinary oxytocin (OXT) level change over an interaction, self-reported attachment using an adapted version of the Lexington Attachment to Pets Scale, and social companionship evaluations administering the Robot-Dog Questionnaire. To examine OXT level changes and self-reported attachment by comparing the two social companions, we conducted mixed-effects model analyses and planned follow-up comparisons. Frequency comparison, binary logistic regression, and thematic analysis were performed to analyze social companionship evaluations. Results revealed significant differences between dogs and AIBOs in fostering social bonds. OXT level change increased during interactions with dogs but decreased with AIBOs. Participants reported stronger attachment to dogs and rated them as better social companions. These findings highlight the current limitations of AIBOs in fostering social bonding immediately compared to dogs. Our study contributes to the growing HRI research by demonstrating an existing gap between AIBOs and dogs as social companions. It highlights the need for further investigation to understand the complexities of social bonding with companion robots, which is essential to implement successful applications for social robots in diverse domains such as the elderly and health care, education, and entertainment.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Miller, John H.; Pynadath, David V.
Exploring the choice landscape: Anchoring and framing effects on search behavior in complex choices Journal Article
In: Journal of Choice Modelling, vol. 55, pp. 100549, 2025, ISSN: 17555345.
@article{gurney_exploring_2025,
title = {Exploring the choice landscape: Anchoring and framing effects on search behavior in complex choices},
author = {Nikolos Gurney and John H. Miller and David V. Pynadath},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1755534525000120},
doi = {10.1016/j.jocm.2025.100549},
issn = {17555345},
year = {2025},
date = {2025-06-01},
urldate = {2025-04-15},
journal = {Journal of Choice Modelling},
volume = {55},
pages = {100549},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew
Logical Abduction as a Computational Model of Narrative Proceedings Article
In: Geneva, Switzerland, 2025.
@inproceedings{gordon_andrew_logical_2025,
title = {Logical Abduction as a Computational Model of Narrative},
author = {Andrew Gordon},
url = {chrome-extension://efaidnbmnnnibpcajpcglclefindmkaj/https://asgordon.github.io/publications/CMN2025.PDF},
year = {2025},
date = {2025-05-01},
address = {Geneva, Switzerland},
keywords = {DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
The Impact of Color Correlated Temperature and Illuminance Levels of Office Lighting on Stress and Cognitive Restoration Journal Article
In: Journal of Environmental Psychology, pp. 102628, 2025, ISSN: 02724944.
@article{awada_impact_2025,
title = {The Impact of Color Correlated Temperature and Illuminance Levels of Office Lighting on Stress and Cognitive Restoration},
author = {Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494425001112},
doi = {10.1016/j.jenvp.2025.102628},
issn = {02724944},
year = {2025},
date = {2025-05-01},
urldate = {2025-05-20},
journal = {Journal of Environmental Psychology},
pages = {102628},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Fu, Boxi; Dincer, Betul; Masur, Omkar; Faizi, David; Ravindran, Harshul; Wang, Julia; Lai, Devashish; Merchant, Chirag
Becoming Fei: An Educational Game for AI and Data Science Education for Novice Learners Book Section
In: Smith, Brian K.; Borge, Marcela (Ed.): Learning and Collaboration Technologies, vol. 15808, pp. 69–79, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-93745-3 978-3-031-93746-0, (Series Title: Lecture Notes in Computer Science).
@incollection{smith_becoming_2025,
title = {Becoming Fei: An Educational Game for AI and Data Science Education for Novice Learners},
author = {Ning Wang and Boxi Fu and Betul Dincer and Omkar Masur and David Faizi and Harshul Ravindran and Julia Wang and Devashish Lai and Chirag Merchant},
editor = {Brian K. Smith and Marcela Borge},
url = {https://link.springer.com/10.1007/978-3-031-93746-0_6},
doi = {10.1007/978-3-031-93746-0_6},
isbn = {978-3-031-93745-3 978-3-031-93746-0},
year = {2025},
date = {2025-05-01},
urldate = {2025-06-12},
booktitle = {Learning and Collaboration Technologies},
volume = {15808},
pages = {69–79},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC},
pubstate = {published},
tppubtype = {incollection}
}
Core, Mark; Nye, Benjamin; Carr, Kayla; Li, Shirley; Shiel, Aaron; Auerbach, Daniel; Leeds, Andrew; Swartout, William
Usability and Preferences for a Personalized Adaptive Learning System for AI Upskilling Journal Article
In: FLAIRS, vol. 38, 2025, ISSN: 2334-0762, 2334-0754.
Abstract | Links | BibTeX | Tags: AI, DTIC
@article{core_usability_2025,
title = {Usability and Preferences for a Personalized Adaptive Learning System for AI Upskilling},
author = {Mark Core and Benjamin Nye and Kayla Carr and Shirley Li and Aaron Shiel and Daniel Auerbach and Andrew Leeds and William Swartout},
url = {https://journals.flvc.org/FLAIRS/article/view/138996},
doi = {10.32473/flairs.38.1.138996},
issn = {2334-0762, 2334-0754},
year = {2025},
date = {2025-05-01},
urldate = {2025-05-20},
journal = {FLAIRS},
volume = {38},
abstract = {As AI tools become common across jobs and industries, it is critical to broaden education about AI beyond teaching computer scientists how to build AI systems. To expand AI education, we are researching AI for AI learning: a personalized and adaptive learning system that integrates dialog-based tutoring and gamified programming activities. To study this problem, we adapted and expanded an existing smartphone adaptive coach to develop the Game-if-AI system. Using a design-based research approach, Game-if-AI was iteratively tested and improved across four semesters of optional use in a course designed for technician-level understanding of AI: mastering programming skills to apply AI libraries and established models. In this study, we measured the interests and needs of these technical learners, based on both survey data and on how they engaged with topics in the system. Based on this data, new topics were added and the system was refined. In this paper, we report students' usability ratings for system components and student preferences based on completion rates of AI topics available each semester. Students rated the adaptive system positively overall (93% rated as a "good idea"), but more complex learning activities (tutoring dialogs, programming) were rated lower than traditional ones (e.g., multiple choice, reading). Students were most likely to master topics highly aligned to the course materials, as well as self-directed learning toward easier high-interest topics (e.g., LLM Prompting).},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {article}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Int J Artif Intell Educ, 2025, ISSN: 1560-4292, 1560-4306.
Links | BibTeX | Tags: DTIC, Learning Sciences
@article{okado_how_2025,
title = {How Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
url = {https://link.springer.com/10.1007/s40593-025-00482-w},
doi = {10.1007/s40593-025-00482-w},
issn = {1560-4292, 1560-4306},
year = {2025},
date = {2025-05-01},
urldate = {2025-06-24},
journal = {Int J Artif Intell Educ},
keywords = {DTIC, Learning Sciences},
pubstate = {published},
tppubtype = {article}
}
Lin, Spencer; Jun, Miru; Rizk, Basem; Shieh, Karen; Fisher, Scott; Mozgai, Sharon
Optimizing SIA Development: A Case Study in User-Centered Design for Estuary, a Multimodal Socially Interactive Agent Framework Proceedings Article
In: Proceedings of the Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–9, 2025, (arXiv:2504.14427 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC
@inproceedings{lin_optimizing_2025,
title = {Optimizing SIA Development: A Case Study in User-Centered Design for Estuary, a Multimodal Socially Interactive Agent Framework},
author = {Spencer Lin and Miru Jun and Basem Rizk and Karen Shieh and Scott Fisher and Sharon Mozgai},
url = {http://arxiv.org/abs/2504.14427},
doi = {10.1145/3706599.3707399},
year = {2025},
date = {2025-04-01},
urldate = {2025-05-20},
booktitle = {Proceedings of the Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–9},
abstract = {This case study presents our user-centered design model for Socially Intelligent Agent (SIA) development frameworks through our experience developing Estuary, an open source multimodal framework for building low-latency real-time socially interactive agents. We leverage the Rapid Assessment Process (RAP) to collect the thoughts of leading researchers in the field of SIAs regarding the current state of the art for SIA development as well as their evaluation of how well Estuary may potentially address current research gaps. We achieve this through a series of end-user interviews conducted by a fellow researcher in the community. We hope that the findings of our work will not only assist the continued development of Estuary but also guide the development of other future frameworks and technologies for SIAs.},
note = {arXiv:2504.14427 [cs]},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Rakshit, Sushrita; Chawla, Kushal; Brett, Jeanne M.; Gratch, Jonathan
KODIS: A Multicultural Dispute Resolution Dialogue Corpus Miscellaneous
2025, (arXiv:2504.12723 [cs]).
Abstract | Links | BibTeX | Tags: Dialogue, DTIC
@misc{hale_kodis_2025,
title = {KODIS: A Multicultural Dispute Resolution Dialogue Corpus},
author = {James Hale and Sushrita Rakshit and Kushal Chawla and Jeanne M. Brett and Jonathan Gratch},
url = {http://arxiv.org/abs/2504.12723},
doi = {10.48550/arXiv.2504.12723},
year = {2025},
date = {2025-04-01},
urldate = {2025-05-20},
publisher = {arXiv},
abstract = {We present KODIS, a dyadic dispute resolution corpus containing thousands of dialogues from over 75 countries. Motivated by a theoretical model of culture and conflict, participants engage in a typical customer service dispute designed by experts to evoke strong emotions and conflict. The corpus contains a rich set of dispositional, process, and outcome measures. The initial analysis supports theories of how anger expressions lead to escalatory spirals and highlights cultural differences in emotional expression. We make this corpus and data collection framework available to the community.},
note = {arXiv:2504.12723 [cs]},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {misc}
}
Chaubey, Ashutosh; Guan, Xulang; Soleymani, Mohammad
Face-LLaVA: Facial Expression and Attribute Understanding through Instruction Tuning Miscellaneous
2025, (Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, LLM
@misc{chaubey_face-llava_2025,
title = {Face-LLaVA: Facial Expression and Attribute Understanding through Instruction Tuning},
author = {Ashutosh Chaubey and Xulang Guan and Mohammad Soleymani},
url = {https://arxiv.org/abs/2504.07198},
doi = {10.48550/ARXIV.2504.07198},
year = {2025},
date = {2025-04-01},
urldate = {2025-04-15},
publisher = {arXiv},
abstract = {The human face plays a central role in social communication, necessitating the use of performant computer vision tools for human-centered applications. We propose Face-LLaVA, a multimodal large language model for face-centered, in-context learning, including facial expression and attribute recognition. Additionally, Face-LLaVA is able to generate natural language descriptions that can be used for reasoning. Leveraging existing visual databases, we first developed FaceInstruct-1M, a face-centered database for instruction tuning MLLMs for face processing. We then developed a novel face-specific visual encoder powered by Face-Region Guided Cross-Attention that integrates face geometry with local visual features. We evaluated the proposed method across nine different datasets and five different face processing tasks, including facial expression recognition, action unit detection, facial attribute detection, age estimation and deepfake detection. Face-LLaVA achieves superior results compared to existing open-source MLLMs and competitive performance compared to commercial solutions. Our model output also receives a higher reasoning rating by GPT under a zero-shot setting across all the tasks. Both our dataset and model wil be released at https://face-llava.github.io to support future advancements in social AI and foundational vision-language research.},
note = {Version Number: 1},
keywords = {DTIC, LLM},
pubstate = {published},
tppubtype = {misc}
}
Fonseca, Henrique Correia Da; Melo, Celso M. De; Terada, Kazunori; Gratch, Jonathan; Paiva, Ana S.; Santos, Francisco C.
Evolution of indirect reciprocity under emotion expression Journal Article
In: Sci Rep, vol. 15, no. 1, pp. 9151, 2025, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags: DTIC
@article{correia_da_fonseca_evolution_2025,
title = {Evolution of indirect reciprocity under emotion expression},
author = {Henrique Correia Da Fonseca and Celso M. De Melo and Kazunori Terada and Jonathan Gratch and Ana S. Paiva and Francisco C. Santos},
url = {https://www.nature.com/articles/s41598-025-89588-8},
doi = {10.1038/s41598-025-89588-8},
issn = {2045-2322},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-20},
journal = {Sci Rep},
volume = {15},
number = {1},
pages = {9151},
abstract = {Abstract
Do emotion expressions impact the evolution of cooperation? Indirect Reciprocity offers a solution to the cooperation dilemma with prior work focusing on the role of social norms in propagating others’ reputations and contributing to evolutionarily stable cooperation. Recent experimental studies, however, show that emotion expressions shape pro-social behaviour, communicate one’s intentions to others, and serve an error-correcting function; yet, the role of emotion signals in the evolution of cooperation remains unexplored. We present the first model of IR based on evolutionary game theory that exposes how emotion expressions positively influence the evolution of cooperation, particularly in scenarios of frequent errors. Our findings provide evolutionary support for the existence of emotion-based social norms, which help foster cooperation among unrelated individuals.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Do emotion expressions impact the evolution of cooperation? Indirect Reciprocity offers a solution to the cooperation dilemma with prior work focusing on the role of social norms in propagating others’ reputations and contributing to evolutionarily stable cooperation. Recent experimental studies, however, show that emotion expressions shape pro-social behaviour, communicate one’s intentions to others, and serve an error-correcting function; yet, the role of emotion signals in the evolution of cooperation remains unexplored. We present the first model of IR based on evolutionary game theory that exposes how emotion expressions positively influence the evolution of cooperation, particularly in scenarios of frequent errors. Our findings provide evolutionary support for the existence of emotion-based social norms, which help foster cooperation among unrelated individuals.
Jalal-Kamali, Ali; Gurney, Nikolos; Pynadath, David
Predicting Team Performance from Communications in Simulated Search-and-Rescue Miscellaneous
2025, (arXiv:2503.03791 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC
@misc{jalal-kamali_predicting_2025,
title = {Predicting Team Performance from Communications in Simulated Search-and-Rescue},
author = {Ali Jalal-Kamali and Nikolos Gurney and David Pynadath},
url = {http://arxiv.org/abs/2503.03791},
doi = {10.48550/arXiv.2503.03791},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-18},
publisher = {arXiv},
abstract = {Understanding how individual traits influence team performance is valuable, but these traits are not always directly observable. Prior research has inferred traits like trust from behavioral data. We analyze conversational data to identify team traits and their correlation with teaming outcomes. Using transcripts from a Minecraft-based search-and-rescue experiment, we apply topic modeling and clustering to uncover key interaction patterns. Our findings show that variations in teaming outcomes can be explained through these inferences, with different levels of predictive power derived from individual traits and team dynamics.},
note = {arXiv:2503.03791 [cs]},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Becerik-Gerber, Burcin; Pynadath, David V.; Marti, Deniz; Lucas, Gale M.
Elicitation and verification of learning via experts (EVOLVE) for creating a theoretical framework for active shooter incidents Journal Article
In: Developments in the Built Environment, vol. 21, pp. 100635, 2025, ISSN: 26661659.
Links | BibTeX | Tags: DTIC, Social Simulation
@article{liu_elicitation_2025,
title = {Elicitation and verification of learning via experts (EVOLVE) for creating a theoretical framework for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and David V. Pynadath and Deniz Marti and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2666165925000353},
doi = {10.1016/j.dibe.2025.100635},
issn = {26661659},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-18},
journal = {Developments in the Built Environment},
volume = {21},
pages = {100635},
keywords = {DTIC, Social Simulation},
pubstate = {published},
tppubtype = {article}
}
Ustun, Volkan; Hans, Soham; Kumar, Rajay; Wang, Yunzhe
Abstracting Geo-specific Terrains to Scale Up Reinforcement Learning Miscellaneous
2025, (arXiv:2503.20078 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Simulation
@misc{ustun_abstracting_2025,
title = {Abstracting Geo-specific Terrains to Scale Up Reinforcement Learning},
author = {Volkan Ustun and Soham Hans and Rajay Kumar and Yunzhe Wang},
url = {http://arxiv.org/abs/2503.20078},
doi = {10.48550/arXiv.2503.20078},
year = {2025},
date = {2025-03-01},
urldate = {2025-04-15},
publisher = {arXiv},
abstract = {Multi-agent reinforcement learning (MARL) is increasingly ubiquitous in training dynamic and adaptive synthetic characters for interactive simulations on geo-specific terrains. Frameworks such as Unity's ML-Agents help to make such reinforcement learning experiments more accessible to the simulation community. Military training simulations also benefit from advances in MARL, but they have immense computational requirements due to their complex, continuous, stochastic, partially observable, non-stationary, and doctrine-based nature. Furthermore, these simulations require geo-specific terrains, further exacerbating the computational resources problem. In our research, we leverage Unity's waypoints to automatically generate multi-layered representation abstractions of the geo-specific terrains to scale up reinforcement learning while still allowing the transfer of learned policies between different representations. Our early exploratory results on a novel MARL scenario, where each side has differing objectives, indicate that waypoint-based navigation enables faster and more efficient learning while producing trajectories similar to those taken by expert human players in CSGO gaming environments. This research points out the potential of waypoint-based navigation for reducing the computational costs of developing and training MARL models for military training simulations, where geo-specific terrains and differing objectives are crucial.},
note = {arXiv:2503.20078 [cs]},
keywords = {DTIC, Simulation},
pubstate = {published},
tppubtype = {misc}
}
Gurney, Nikolos; Pynadath, David V.; Miller, John H.
Willingness to work as a predictor of human-agent team success Journal Article
In: Front. Comput. Sci., vol. 7, pp. 1405436, 2025, ISSN: 2624-9898.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Agents
@article{gurney_willingness_2025,
title = {Willingness to work as a predictor of human-agent team success},
author = {Nikolos Gurney and David V. Pynadath and John H. Miller},
url = {https://www.frontiersin.org/articles/10.3389/fcomp.2025.1405436/full},
doi = {10.3389/fcomp.2025.1405436},
issn = {2624-9898},
year = {2025},
date = {2025-03-01},
urldate = {2025-04-15},
journal = {Front. Comput. Sci.},
volume = {7},
pages = {1405436},
abstract = {Research shows that the effectiveness of human-agent teams depends heavily on human team members' prior experiences, whether from direct teaming activities or relevant domain knowledge. While researchers have proposed various mechanisms to explain this relationship, we present a simpler alternative explanation: experience serves primarily as an indicator of a person's fundamental willingness to engage in teaming tasks. We introduce a measure called “willingness to work” that quantifies this underlying disposition. Our empirical analysis demonstrates that this straightforward metric robustly predicts human-agent team performance. Beyond its practical value as a predictive tool, this reconceptualization of the experience-performance relationship necessitates a fresh examination of existing findings in the field. The results suggest that a team member's basic willingness to invest effort may be more fundamental to success than previously recognized mechanisms.},
keywords = {DTIC, Virtual Agents},
pubstate = {published},
tppubtype = {article}
}
Siniukov, Maksim; Chang, Di; Tran, Minh; Gong, Hongkun; Chaubey, Ashutosh; Soleymani, Mohammad
DiTaiListener: Controllable High Fidelity Listener Video Generation with Diffusion Miscellaneous
2025, (Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, VGL
@misc{siniukov_ditailistener_2025,
title = {DiTaiListener: Controllable High Fidelity Listener Video Generation with Diffusion},
author = {Maksim Siniukov and Di Chang and Minh Tran and Hongkun Gong and Ashutosh Chaubey and Mohammad Soleymani},
url = {https://arxiv.org/abs/2504.04010},
doi = {10.48550/ARXIV.2504.04010},
year = {2025},
date = {2025-03-01},
urldate = {2025-04-15},
publisher = {arXiv},
abstract = {Generating naturalistic and nuanced listener motions for extended interactions remains an open problem. Existing methods often rely on low-dimensional motion codes for facial behavior generation followed by photorealistic rendering, limiting both visual fidelity and expressive richness. To address these challenges, we introduce DiTaiListener, powered by a video diffusion model with multimodal conditions. Our approach first generates short segments of listener responses conditioned on the speaker's speech and facial motions with DiTaiListener-Gen. It then refines the transitional frames via DiTaiListener-Edit for a seamless transition. Specifically, DiTaiListener-Gen adapts a Diffusion Transformer (DiT) for the task of listener head portrait generation by introducing a Causal Temporal Multimodal Adapter (CTM-Adapter) to process speakers' auditory and visual cues. CTM-Adapter integrates speakers' input in a causal manner into the video generation process to ensure temporally coherent listener responses. For long-form video generation, we introduce DiTaiListener-Edit, a transition refinement video-to-video diffusion model. The model fuses video segments into smooth and continuous videos, ensuring temporal consistency in facial expressions and image quality when merging short video segments produced by DiTaiListener-Gen. Quantitatively, DiTaiListener achieves the state-of-the-art performance on benchmark datasets in both photorealism (+73.8% in FID on RealTalk) and motion representation (+6.1% in FD metric on VICO) spaces. User studies confirm the superior performance of DiTaiListener, with the model being the clear preference in terms of feedback, diversity, and smoothness, outperforming competitors by a significant margin.},
note = {Version Number: 1},
keywords = {DTIC, VGL},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.; Busta, Kelly
Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities Journal Article
In: International Journal of Disaster Risk Reduction, vol. 118, pp. 105225, 2025, ISSN: 22124209.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{liu_impact_2025,
title = {Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas and Kelly Busta},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2212420925000494},
doi = {10.1016/j.ijdrr.2025.105225},
issn = {22124209},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-20},
journal = {International Journal of Disaster Risk Reduction},
volume = {118},
pages = {105225},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Liu, Ruying; Becerik-Gerber, Burçin; Lucas, Gale M.
Investigating Role of Personal Factors in Shaping Responses to Active Shooter Incident using Machine Learning Miscellaneous
2025, (arXiv:2503.05719 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, VR
@misc{liu_investigating_2025,
title = {Investigating Role of Personal Factors in Shaping Responses to Active Shooter Incident using Machine Learning},
author = {Ruying Liu and Burçin Becerik-Gerber and Gale M. Lucas},
url = {http://arxiv.org/abs/2503.05719},
doi = {10.48550/arXiv.2503.05719},
year = {2025},
date = {2025-02-01},
urldate = {2025-03-18},
publisher = {arXiv},
abstract = {This study bridges the knowledge gap on how personal factors affect building occupants' responses in active shooter situations by applying interpretable machine learning methods to data from 107 participants. The personal factors studied are training methods, prior training experience, sense of direction, and gender. The response performance measurements consist of decisions (run, hide, multiple), vulnerability (corresponding to the time a participant is visible to a shooter), and pre-evacuation time. The results indicate that the propensity to run significantly determines overall response strategies, overshadowing vulnerability, and pre-evacuation time. The training method is a critical factor where VR-based training leads to better responses than video-based training. A better sense of direction and previous training experience are correlated with a greater propensity to run and less vulnerability. Gender slightly influences decisions and vulnerability but significantly impacts pre-evacuation time, with females evacuating slower, potentially due to higher risk perception. This study underscores the importance of personal factors in shaping responses to active shooter incidents.},
note = {arXiv:2503.05719 [cs]},
keywords = {DTIC, Social Simulation, VR},
pubstate = {published},
tppubtype = {misc}
}
Siniukov, Maksim; Xing, Ellie; Sanaz,; Isfahani, Attaripour; Soleymani, Mohammad
Towards a Generalizable Speech Marker for Parkinson's Disease Diagnosis Miscellaneous
2025, (Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC
@misc{siniukov_towards_2025,
title = {Towards a Generalizable Speech Marker for Parkinson's Disease Diagnosis},
author = {Maksim Siniukov and Ellie Xing and Sanaz and Attaripour Isfahani and Mohammad Soleymani},
url = {https://arxiv.org/abs/2501.03581},
doi = {10.48550/ARXIV.2501.03581},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-14},
publisher = {arXiv},
abstract = {Parkinson's Disease (PD) is a neurodegenerative disorder characterized by motor symptoms, including altered voice production in the early stages. Early diagnosis is crucial not only to improve PD patients' quality of life but also to enhance the efficacy of potential disease-modifying therapies during early neurodegeneration, a window often missed by current diagnostic tools. In this paper, we propose a more generalizable approach to PD recognition through domain adaptation and self-supervised learning. We demonstrate the generalization capabilities of the proposed approach across diverse datasets in different languages. Our approach leverages HuBERT, a large deep neural network originally trained for speech recognition and further trains it on unlabeled speech data from a population that is similar to the target group, i.e., the elderly, in a self-supervised manner. The model is then fine-tuned and adapted for use across different datasets in multiple languages, including English, Italian, and Spanish. Evaluations on four publicly available PD datasets demonstrate the model's efficacy, achieving an average specificity of 92.1% and an average sensitivity of 91.2%. This method offers objective and consistent evaluations across large populations, addressing the variability inherent in human assessments and providing a non-invasive, cost-effective and accessible diagnostic option.},
note = {Version Number: 1},
keywords = {DTIC},
pubstate = {published},
tppubtype = {misc}
}
Rodrigues, Patrick Borges; Becerik-Gerber, Burcin; Soibelman, Lucio; Lucas, Gale M.; Roll, Shawn C.
Impact of selective environmental sound attenuation on operator performance, stress, attention, and task engagement in teleoperated demolition Journal Article
In: Automation in Construction, vol. 169, pp. 105876, 2025, ISSN: 09265805.
@article{rodrigues_impact_2025,
title = {Impact of selective environmental sound attenuation on operator performance, stress, attention, and task engagement in teleoperated demolition},
author = {Patrick Borges Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0926580524006125},
doi = {10.1016/j.autcon.2024.105876},
issn = {09265805},
year = {2025},
date = {2025-01-01},
urldate = {2024-12-20},
journal = {Automation in Construction},
volume = {169},
pages = {105876},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Liu, Rong; Sun, Dylan; Chen, Meida; Wang, Yue; Feng, Andrew
Deformable Beta Splatting Miscellaneous
2025, (arXiv:2501.18630 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Narrative
@misc{liu_deformable_2025,
title = {Deformable Beta Splatting},
author = {Rong Liu and Dylan Sun and Meida Chen and Yue Wang and Andrew Feng},
url = {http://arxiv.org/abs/2501.18630},
doi = {10.48550/arXiv.2501.18630},
year = {2025},
date = {2025-01-01},
urldate = {2025-02-20},
publisher = {arXiv},
abstract = {3D Gaussian Splatting (3DGS) has advanced radiance field reconstruction by enabling real-time rendering. However, its reliance on Gaussian kernels for geometry and low-order Spherical Harmonics (SH) for color encoding limits its ability to capture complex geometries and diverse colors. We introduce Deformable Beta Splatting (DBS), a deformable and compact approach that enhances both geometry and color representation. DBS replaces Gaussian kernels with deformable Beta Kernels, which offer bounded support and adaptive frequency control to capture fine geometric details with higher fidelity while achieving better memory efficiency. In addition, we extended the Beta Kernel to color encoding, which facilitates improved representation of diffuse and specular components, yielding superior results compared to SH-based methods. Furthermore, Unlike prior densification techniques that depend on Gaussian properties, we mathematically prove that adjusting regularized opacity alone ensures distribution-preserved Markov chain Monte Carlo (MCMC), independent of the splatting kernel type. Experimental results demonstrate that DBS achieves state-of-the-art visual quality while utilizing only 45% of the parameters and rendering 1.5x faster than 3DGS-based methods. Notably, for the first time, splatting-based methods outperform state-of-the-art Neural Radiance Fields, highlighting the superior performance and efficiency of DBS for real-time radiance field rendering.},
note = {arXiv:2501.18630 [cs]},
keywords = {DTIC, Narrative},
pubstate = {published},
tppubtype = {misc}
}
Wang, Ning; Hurt, Timothy; Krakowski, Ari; Greenwald, Eric; Hammerman, Jim; Santos, Sabrina De Los; Masur, Omkar; Fu, Boxi; Merchant, Chirag
Virtually Human: An Exhibit for Public AI Education Book Section
In: Stephanidis, Constantine; Antona, Margherita; Ntoa, Stavroula; Salvendy, Gavriel (Ed.): HCI International 2025 Posters, vol. 2529, pp. 436–443, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-94170-2 978-3-031-94171-9, (Series Title: Communications in Computer and Information Science).
@incollection{stephanidis_virtually_2025,
title = {Virtually Human: An Exhibit for Public AI Education},
author = {Ning Wang and Timothy Hurt and Ari Krakowski and Eric Greenwald and Jim Hammerman and Sabrina De Los Santos and Omkar Masur and Boxi Fu and Chirag Merchant},
editor = {Constantine Stephanidis and Margherita Antona and Stavroula Ntoa and Gavriel Salvendy},
url = {https://link.springer.com/10.1007/978-3-031-94171-9_42},
doi = {10.1007/978-3-031-94171-9_42},
isbn = {978-3-031-94170-2 978-3-031-94171-9},
year = {2025},
date = {2025-01-01},
urldate = {2025-06-17},
booktitle = {HCI International 2025 Posters},
volume = {2529},
pages = {436–443},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {DTIC},
pubstate = {published},
tppubtype = {incollection}
}
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
SetPeER: Set-Based Personalized Emotion Recognition With Weak Supervision Journal Article
In: IEEE Trans. Affective Comput., pp. 1–15, 2025, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: DTIC, Emotion
@article{tran_setpeer_2025,
title = {SetPeER: Set-Based Personalized Emotion Recognition With Weak Supervision},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10993348/},
doi = {10.1109/TAFFC.2025.3568024},
issn = {1949-3045, 2371-9850},
year = {2025},
date = {2025-01-01},
urldate = {2025-05-20},
journal = {IEEE Trans. Affective Comput.},
pages = {1–15},
keywords = {DTIC, Emotion},
pubstate = {published},
tppubtype = {article}
}
2024
Xu, Jiuyi; Chen, Meida; Feng, Andrew; Yu, Zifan; Shi, Yangming
Open-Vocabulary High-Resolution 3D (OVHR3D) Data Segmentation and Annotation Framework Journal Article
In: 2024, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: DTIC, Narrative
@article{xu_open-vocabulary_2024,
title = {Open-Vocabulary High-Resolution 3D (OVHR3D) Data Segmentation and Annotation Framework},
author = {Jiuyi Xu and Meida Chen and Andrew Feng and Zifan Yu and Yangming Shi},
url = {https://arxiv.org/abs/2412.06268},
doi = {10.48550/ARXIV.2412.06268},
year = {2024},
date = {2024-12-01},
urldate = {2024-12-20},
abstract = {In the domain of the U.S. Army modeling and simulation, the availability of high quality annotated 3D data is pivotal to creating virtual environments for training and simulations. Traditional methodologies for 3D semantic and instance segmentation, such as KpConv, RandLA, Mask3D, etc., are designed to train on extensive labeled datasets to obtain satisfactory performance in practical tasks. This requirement presents a significant challenge, given the inherent scarcity of manually annotated 3D datasets, particularly for the military use cases. Recognizing this gap, our previous research leverages the One World Terrain data repository manually annotated databases, as showcased at IITSEC 2019 and 2021, to enrich the training dataset for deep learning models. However, collecting and annotating large scale 3D data for specific tasks remains costly and inefficient. To this end, the objective of this research is to design and develop a comprehensive and efficient framework for 3D segmentation tasks to assist in 3D data annotation. This framework integrates Grounding DINO and Segment anything Model, augmented by an enhancement in 2D image rendering via 3D mesh. Furthermore, the authors have also developed a user friendly interface that facilitates the 3D annotation process, offering intuitive visualization of rendered images and the 3D point cloud.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {DTIC, Narrative},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
DIM: Dyadic Interaction Modeling for Social Behavior Generation Book Section
In: Leonardis, Aleš; Ricci, Elisa; Roth, Stefan; Russakovsky, Olga; Sattler, Torsten; Varol, Gül (Ed.): Computer Vision – ECCV 2024, vol. 15095, pp. 484–503, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-72912-6 978-3-031-72913-3, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, Social
@incollection{leonardis_dim_2024,
title = {DIM: Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
editor = {Aleš Leonardis and Elisa Ricci and Stefan Roth and Olga Russakovsky and Torsten Sattler and Gül Varol},
url = {https://link.springer.com/10.1007/978-3-031-72913-3_27},
doi = {10.1007/978-3-031-72913-3_27},
isbn = {978-3-031-72912-6 978-3-031-72913-3},
year = {2024},
date = {2024-12-01},
urldate = {2025-01-16},
booktitle = {Computer Vision – ECCV 2024},
volume = {15095},
pages = {484–503},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, Social},
pubstate = {published},
tppubtype = {incollection}
}
Marti, Deniz; Budathoki, Anjila; Ding, Yi; Lucas, Gale; Nelson, David
How Does Acknowledging Users’ Preferences Impact AI’s Ability to Make Conflicting Recommendations? Journal Article
In: International Journal of Human–Computer Interaction, pp. 1–12, 2024, ISSN: 1044-7318, 1532-7590.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{marti_how_2024,
title = {How Does Acknowledging Users’ Preferences Impact AI’s Ability to Make Conflicting Recommendations?},
author = {Deniz Marti and Anjila Budathoki and Yi Ding and Gale Lucas and David Nelson},
url = {https://www.tandfonline.com/doi/full/10.1080/10447318.2024.2426035},
doi = {10.1080/10447318.2024.2426035},
issn = {1044-7318, 1532-7590},
year = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {International Journal of Human–Computer Interaction},
pages = {1–12},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Bonial, Claire; Lukin, Stephanie M.; Abrams, Mitchell; Baker, Anthony; Donatelli, Lucia; Foots, Ashley; Hayes, Cory J.; Henry, Cassidy; Hudson, Taylor; Marge, Matthew; Pollard, Kimberly A.; Artstein, Ron; Traum, David; Voss, Clare R.
Human–robot dialogue annotation for multi-modal common ground Journal Article
In: Lang Resources & Evaluation, 2024, ISSN: 1574-020X, 1574-0218.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{bonial_humanrobot_2024,
title = {Human–robot dialogue annotation for multi-modal common ground},
author = {Claire Bonial and Stephanie M. Lukin and Mitchell Abrams and Anthony Baker and Lucia Donatelli and Ashley Foots and Cory J. Hayes and Cassidy Henry and Taylor Hudson and Matthew Marge and Kimberly A. Pollard and Ron Artstein and David Traum and Clare R. Voss},
url = {https://link.springer.com/10.1007/s10579-024-09784-2},
doi = {10.1007/s10579-024-09784-2},
issn = {1574-020X, 1574-0218},
year = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {Lang Resources & Evaluation},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chen, Meida; Han, Kangle; Yu, Zifan; Feng, Andrew; Hou, Yu; You, Suya; Soibelman, Lucio
An Aerial Photogrammetry Benchmark Dataset for Point Cloud Segmentation and Style Translation Journal Article
In: Remote Sensing, vol. 16, no. 22, pp. 4240, 2024, ISSN: 2072-4292.
Abstract | Links | BibTeX | Tags: DTIC, VGL
@article{chen_aerial_2024,
title = {An Aerial Photogrammetry Benchmark Dataset for Point Cloud Segmentation and Style Translation},
author = {Meida Chen and Kangle Han and Zifan Yu and Andrew Feng and Yu Hou and Suya You and Lucio Soibelman},
url = {https://www.mdpi.com/2072-4292/16/22/4240},
doi = {10.3390/rs16224240},
issn = {2072-4292},
year = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {Remote Sensing},
volume = {16},
number = {22},
pages = {4240},
abstract = {The recent surge in diverse 3D datasets spanning various scales and applications marks a significant advancement in the field. However, the comprehensive process of data acquisition, refinement, and annotation at a large scale poses a formidable challenge, particularly for individual researchers and small teams. To this end, we present a novel synthetic 3D point cloud generation framework that can produce detailed outdoor aerial photogrammetric 3D datasets with accurate ground truth annotations without the labor-intensive and time-consuming data collection/annotation processes. Our pipeline procedurally generates synthetic environments, mirroring real-world data collection and 3D reconstruction processes. A key feature of our framework is its ability to replicate consistent quality, noise patterns, and diversity similar to real-world datasets. This is achieved by adopting UAV flight patterns that resemble those used in real-world data collection processes (e.g., the cross-hatch flight pattern) across various synthetic terrains that are procedurally generated, thereby ensuring data consistency akin to real-world scenarios. Moreover, the generated datasets are enriched with precise semantic and instance annotations, eliminating the need for manual labeling. Our approach has led to the development and release of the Semantic Terrain Points Labeling—Synthetic 3D (STPLS3D) benchmark, an extensive outdoor 3D dataset encompassing over 16 km2, featuring up to 19 semantic labels. We also collected, reconstructed, and annotated four real-world datasets for validation purposes. Extensive experiments on these datasets demonstrate our synthetic datasets’ effectiveness, superior quality, and their value as a benchmark dataset for further point cloud research.},
keywords = {DTIC, VGL},
pubstate = {published},
tppubtype = {article}
}
Loucks, Laura; Rizzo, Albert; Rothbaum, Barbara O.
Virtual Reality Exposure for Treating PTSD Due to Military Sexual Trauma Journal Article
In: J Clin Psychol, pp. jclp.23750, 2024, ISSN: 0021-9762, 1097-4679.
Abstract | Links | BibTeX | Tags: DTIC, MedVR
@article{loucks_virtual_2024,
title = {Virtual Reality Exposure for Treating PTSD Due to Military Sexual Trauma},
author = {Laura Loucks and Albert Rizzo and Barbara O. Rothbaum},
url = {https://onlinelibrary.wiley.com/doi/10.1002/jclp.23750},
doi = {10.1002/jclp.23750},
issn = {0021-9762, 1097-4679},
year = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {J Clin Psychol},
pages = {jclp.23750},
abstract = {ABSTRACT
Virtual reality exposure therapy (VRE) has been used in the treatment of combat‐related PTSD since the late 1990s and was recently adapted to treat PTSD due to military sexual trauma (MST). With content specifically tailored to MST‐related contexts, we present the case study of a military veteran who participated in the open clinical trial examining the feasibility of VRE in the treatment of MST‐related PTSD (Loucks et al. 2019). We illustrate VRE's use in activating the trauma memory to facilitate therapeutic emotional processing across sessions and overall symptom reduction. The case study includes common challenges that may occur during VRE and relevant recommendations. The discussion will include lessons learned from the case study and the open clinical trial, recommendations for the flexible application of VRE, and the ongoing developments in the latest version of the VRE system, informed by feedback acquired from the clinicians and patients who experienced it in the initial clinical trial.},
keywords = {DTIC, MedVR},
pubstate = {published},
tppubtype = {article}
}
Virtual reality exposure therapy (VRE) has been used in the treatment of combat‐related PTSD since the late 1990s and was recently adapted to treat PTSD due to military sexual trauma (MST). With content specifically tailored to MST‐related contexts, we present the case study of a military veteran who participated in the open clinical trial examining the feasibility of VRE in the treatment of MST‐related PTSD (Loucks et al. 2019). We illustrate VRE's use in activating the trauma memory to facilitate therapeutic emotional processing across sessions and overall symptom reduction. The case study includes common challenges that may occur during VRE and relevant recommendations. The discussion will include lessons learned from the case study and the open clinical trial, recommendations for the flexible application of VRE, and the ongoing developments in the latest version of the VRE system, informed by feedback acquired from the clinicians and patients who experienced it in the initial clinical trial.
Roemmele, Melissa; Gordon, Andrew
From Test-Taking to Test-Making: Examining LLM Authoring of Commonsense Assessment Items Proceedings Article
In: Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 5193–5203, Association for Computational Linguistics, Miami, Florida, USA, 2024.
Links | BibTeX | Tags: DTIC, Learning Sciences
@inproceedings{roemmele_test-taking_2024,
title = {From Test-Taking to Test-Making: Examining LLM Authoring of Commonsense Assessment Items},
author = {Melissa Roemmele and Andrew Gordon},
url = {https://aclanthology.org/2024.findings-emnlp.299},
doi = {10.18653/v1/2024.findings-emnlp.299},
year = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
booktitle = {Findings of the Association for Computational Linguistics: EMNLP 2024},
pages = {5193–5203},
publisher = {Association for Computational Linguistics},
address = {Miami, Florida, USA},
keywords = {DTIC, Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew S.
From Test-Taking to Test-Making: Examining LLM Authoring of Commonsense Assessment Items Miscellaneous
2024, (Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, Learning Sciences
@misc{roemmele_test-taking_2024-1,
title = {From Test-Taking to Test-Making: Examining LLM Authoring of Commonsense Assessment Items},
author = {Melissa Roemmele and Andrew S. Gordon},
url = {https://arxiv.org/abs/2410.14897},
doi = {10.48550/ARXIV.2410.14897},
year = {2024},
date = {2024-10-01},
urldate = {2024-12-05},
publisher = {arXiv},
abstract = {LLMs can now perform a variety of complex writing tasks. They also excel in answering questions pertaining to natural language inference and commonsense reasoning. Composing these questions is itself a skilled writing task, so in this paper we consider LLMs as authors of commonsense assessment items. We prompt LLMs to generate items in the style of a prominent benchmark for commonsense reasoning, the Choice of Plausible Alternatives (COPA). We examine the outcome according to analyses facilitated by the LLMs and human annotation. We find that LLMs that succeed in answering the original COPA benchmark are also more successful in authoring their own items.},
note = {Version Number: 1},
keywords = {DTIC, Learning Sciences},
pubstate = {published},
tppubtype = {misc}
}
Lucas, Gale M.; Becerik-Gerber, Burcin; Roll, Shawn C.
Calibrating workers’ trust in intelligent automated systems Journal Article
In: Patterns, vol. 5, no. 9, pp. 101045, 2024, ISSN: 2666-3899, (Publisher: Elsevier BV).
@article{lucas_calibrating_2024,
title = {Calibrating workers’ trust in intelligent automated systems},
author = {Gale M. Lucas and Burcin Becerik-Gerber and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2666389924001879},
doi = {10.1016/j.patter.2024.101045},
issn = {2666-3899},
year = {2024},
date = {2024-09-01},
urldate = {2024-09-17},
journal = {Patterns},
volume = {5},
number = {9},
pages = {101045},
note = {Publisher: Elsevier BV},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Georgila, Kallirroi
Comparing Pre-Trained Embeddings and Domain-Independent Features for Regression-Based Evaluation of Task-Oriented Dialogue Systems Proceedings Article
In: Proceedings of the 25th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 610–623, Association for Computational Linguistics, Kyoto, Japan, 2024.
Links | BibTeX | Tags: Dialogue, DTIC, Natural Language
@inproceedings{georgila_comparing_2024,
title = {Comparing Pre-Trained Embeddings and Domain-Independent Features for Regression-Based Evaluation of Task-Oriented Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://aclanthology.org/2024.sigdial-1.52},
doi = {10.18653/v1/2024.sigdial-1.52},
year = {2024},
date = {2024-09-01},
urldate = {2024-10-15},
booktitle = {Proceedings of the 25th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {610–623},
publisher = {Association for Computational Linguistics},
address = {Kyoto, Japan},
keywords = {Dialogue, DTIC, Natural Language},
pubstate = {published},
tppubtype = {inproceedings}
}
Gao, Zhiyuan; Teng, Wenbin; Chen, Gonglin; Wu, Jinsen; Xu, Ningli; Qin, Rongjun; Feng, Andrew; Zhao, Yajie
Skyeyes: Ground Roaming using Aerial View Images Miscellaneous
2024, (Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC
@misc{gao_skyeyes_2024,
title = {Skyeyes: Ground Roaming using Aerial View Images},
author = {Zhiyuan Gao and Wenbin Teng and Gonglin Chen and Jinsen Wu and Ningli Xu and Rongjun Qin and Andrew Feng and Yajie Zhao},
url = {https://arxiv.org/abs/2409.16685},
doi = {10.48550/ARXIV.2409.16685},
year = {2024},
date = {2024-09-01},
urldate = {2025-01-16},
publisher = {arXiv},
abstract = {Integrating aerial imagery-based scene generation into applications like autonomous driving and gaming enhances realism in 3D environments, but challenges remain in creating detailed content for occluded areas and ensuring real-time, consistent rendering. In this paper, we introduce Skyeyes, a novel framework that can generate photorealistic sequences of ground view images using only aerial view inputs, thereby creating a ground roaming experience. More specifically, we combine a 3D representation with a view consistent generation model, which ensures coherence between generated images. This method allows for the creation of geometrically consistent ground view images, even with large view gaps. The images maintain improved spatial-temporal coherence and realism, enhancing scene comprehension and visualization from aerial perspectives. To the best of our knowledge, there are no publicly available datasets that contain pairwise geo-aligned aerial and ground view imagery. Therefore, we build a large, synthetic, and geo-aligned dataset using Unreal Engine. Both qualitative and quantitative analyses on this synthetic dataset display superior results compared to other leading synthesis approaches. See the project page for more results: https://chaoren2357.github.io/website-skyeyes/.},
note = {Version Number: 1},
keywords = {DTIC},
pubstate = {published},
tppubtype = {misc}
}
Hale, James; Schweitzer, Lindsey; Gratch, Jonathan
Pitfalls of Embodiment in Human-Agent Experiment Design Proceedings Article
In: Proceedings of the ACM International Conference on Intelligent Virtual Agents, pp. 1–9, ACM, GLASGOW United Kingdom, 2024, ISBN: 979-8-4007-0625-7.
@inproceedings{hale_pitfalls_2024,
title = {Pitfalls of Embodiment in Human-Agent Experiment Design},
author = {James Hale and Lindsey Schweitzer and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3652988.3673958},
doi = {10.1145/3652988.3673958},
isbn = {979-8-4007-0625-7},
year = {2024},
date = {2024-09-01},
urldate = {2025-01-16},
booktitle = {Proceedings of the ACM International Conference on Intelligent Virtual Agents},
pages = {1–9},
publisher = {ACM},
address = {GLASGOW United Kingdom},
keywords = {DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Gonglin; Wu, Jinsen; Chen, Haiwei; Teng, Wenbin; Gao, Zhiyuan; Feng, Andrew; Qin, Rongjun; Zhao, Yajie
Geometry-aware Feature Matching for Large-Scale Structure from Motion Miscellaneous
2024, (Version Number: 3).
Abstract | Links | BibTeX | Tags: DTIC
@misc{chen_geometry-aware_2024,
title = {Geometry-aware Feature Matching for Large-Scale Structure from Motion},
author = {Gonglin Chen and Jinsen Wu and Haiwei Chen and Wenbin Teng and Zhiyuan Gao and Andrew Feng and Rongjun Qin and Yajie Zhao},
url = {https://arxiv.org/abs/2409.02310},
doi = {10.48550/ARXIV.2409.02310},
year = {2024},
date = {2024-09-01},
urldate = {2025-01-16},
publisher = {arXiv},
abstract = {Establishing consistent and dense correspondences across multiple images is crucial for Structure from Motion (SfM) systems. Significant view changes, such as air-to-ground with very sparse view overlap, pose an even greater challenge to the correspondence solvers. We present a novel optimization-based approach that significantly enhances existing feature matching methods by introducing geometry cues in addition to color cues. This helps fill gaps when there is less overlap in large-scale scenarios. Our method formulates geometric verification as an optimization problem, guiding feature matching within detector-free methods and using sparse correspondences from detector-based methods as anchor points. By enforcing geometric constraints via the Sampson Distance, our approach ensures that the denser correspondences from detector-free methods are geometrically consistent and more accurate. This hybrid strategy significantly improves correspondence density and accuracy, mitigates multi-view inconsistencies, and leads to notable advancements in camera pose accuracy and point cloud density. It outperforms state-of-the-art feature matching methods on benchmark datasets and enables feature matching in challenging extreme large-scale settings.},
note = {Version Number: 3},
keywords = {DTIC},
pubstate = {published},
tppubtype = {misc}
}
Tak, Ala N.; Gratch, Jonathan
GPT-4 Emulates Average-Human Emotional Cognition from a Third-Person Perspective Miscellaneous
2024, (arXiv:2408.13718 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Emotions
@misc{tak_gpt-4_2024,
title = {GPT-4 Emulates Average-Human Emotional Cognition from a Third-Person Perspective},
author = {Ala N. Tak and Jonathan Gratch},
url = {http://arxiv.org/abs/2408.13718},
year = {2024},
date = {2024-08-01},
urldate = {2024-09-17},
publisher = {arXiv},
abstract = {This paper extends recent investigations on the emotional reasoning abilities of Large Language Models (LLMs). Current research on LLMs has not directly evaluated the distinction between how LLMs predict the self-attribution of emotions and the perception of others' emotions. We first look at carefully crafted emotion-evoking stimuli, originally designed to find patterns of brain neural activity representing fine-grained inferred emotional attributions of others. We show that GPT-4 is especially accurate in reasoning about such stimuli. This suggests LLMs agree with humans' attributions of others' emotions in stereotypical scenarios remarkably more than self-attributions of emotions in idiosyncratic situations. To further explore this, our second study utilizes a dataset containing annotations from both the author and a third-person perspective. We find that GPT-4's interpretations align more closely with human judgments about the emotions of others than with self-assessments. Notably, conventional computational models of emotion primarily rely on self-reported ground truth as the gold standard. However, an average observer's standpoint, which LLMs appear to have adopted, might be more relevant for many downstream applications, at least in the absence of individual information and adequate safety considerations.},
note = {arXiv:2408.13718 [cs]},
keywords = {DTIC, Emotions},
pubstate = {published},
tppubtype = {misc}
}
Han, Bin; Yau, Cleo; Lei, Su; Gratch, Jonathan
Knowledge-based Emotion Recognition using Large Language Models Miscellaneous
2024, (arXiv:2408.04123 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Emotions
@misc{han_knowledge-based_2024,
title = {Knowledge-based Emotion Recognition using Large Language Models},
author = {Bin Han and Cleo Yau and Su Lei and Jonathan Gratch},
url = {http://arxiv.org/abs/2408.04123},
year = {2024},
date = {2024-08-01},
urldate = {2024-08-15},
publisher = {arXiv},
abstract = {Emotion recognition in social situations is a complex task that requires integrating information from both facial expressions and the situational context. While traditional approaches to automatic emotion recognition have focused on decontextualized signals, recent research emphasizes the importance of context in shaping emotion perceptions. This paper contributes to the emerging field of context-based emotion recognition by leveraging psychological theories of human emotion perception to inform the design of automated methods. We propose an approach that combines emotion recognition methods with Bayesian Cue Integration (BCI) to integrate emotion inferences from decontextualized facial expressions and contextual knowledge inferred via Large-language Models. We test this approach in the context of interpreting facial expressions during a social task, the prisoner's dilemma. Our results provide clear support for BCI across a range of automatic emotion recognition methods. The best automated method achieved results comparable to human observers, suggesting the potential for this approach to advance the field of affective computing.},
note = {arXiv:2408.04123 [cs]},
keywords = {DTIC, Emotions},
pubstate = {published},
tppubtype = {misc}
}
Fischer, Katrin; Velentza, Anna-Maria; Lucas, Gale; Williams, Dmitri
Seeing Eye to Eye with Robots: An Experimental Study Predicting Trust in Social Robots for Domestic Use Proceedings Article
In: 2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN), pp. 2162–2168, IEEE, Pasadena, CA, USA, 2024, ISBN: 979-8-3503-7502-2.
Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{fischer_seeing_2024,
title = {Seeing Eye to Eye with Robots: An Experimental Study Predicting Trust in Social Robots for Domestic Use},
author = {Katrin Fischer and Anna-Maria Velentza and Gale Lucas and Dmitri Williams},
url = {https://ieeexplore.ieee.org/document/10731371/},
doi = {10.1109/RO-MAN60168.2024.10731371},
isbn = {979-8-3503-7502-2},
year = {2024},
date = {2024-08-01},
urldate = {2024-12-05},
booktitle = {2024 33rd IEEE International Conference on Robot and Human Interactive Communication (ROMAN)},
pages = {2162–2168},
publisher = {IEEE},
address = {Pasadena, CA, USA},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ruying; Wu, Wanjing; Becerik-Gerber, Burcin; Lucas, Gale M.
2024, (arXiv:2407.10441 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Virtual Worlds
@misc{liu_enhancing_2024,
title = {Enhancing Building Safety Design for Active Shooter Incidents: Exploration of Building Exit Parameters using Reinforcement Learning-Based Simulations},
author = {Ruying Liu and Wanjing Wu and Burcin Becerik-Gerber and Gale M. Lucas},
url = {http://arxiv.org/abs/2407.10441},
year = {2024},
date = {2024-07-01},
urldate = {2024-09-17},
publisher = {arXiv},
abstract = {With the alarming rise in active shooter incidents (ASIs) in the United States, enhancing public safety through building design has become a pressing need. This study proposes a reinforcement learning-based simulation approach addressing gaps in existing research that has neglected the dynamic behaviours of shooters. We developed an autonomous agent to simulate an active shooter within a realistic office environment, aiming to offer insights into the interactions between building design parameters and ASI outcomes. A case study is conducted to quantitatively investigate the impact of building exit numbers (total count of accessible exits) and configuration (arrangement of which exits are available or not) on evacuation and harm rates. Findings demonstrate that greater exit availability significantly improves evacuation outcomes and reduces harm. Exits nearer to the shooter's initial position hold greater importance for accessibility than those farther away. By encompassing dynamic shooter behaviours, this study offers preliminary insights into effective building safety design against evolving threats.},
note = {arXiv:2407.10441 [cs]},
keywords = {DTIC, Virtual Worlds},
pubstate = {published},
tppubtype = {misc}
}
Xiao, Hanyuan; Chen, Yingshu; Huang, Huajian; Xiong, Haolin; Yang, Jing; Prasad, Pratusha; Zhao, Yajie
Localized Gaussian Splatting Editing with Contextual Awareness Miscellaneous
2024, (arXiv:2408.00083 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, VGL
@misc{xiao_localized_2024,
title = {Localized Gaussian Splatting Editing with Contextual Awareness},
author = {Hanyuan Xiao and Yingshu Chen and Huajian Huang and Haolin Xiong and Jing Yang and Pratusha Prasad and Yajie Zhao},
url = {http://arxiv.org/abs/2408.00083},
year = {2024},
date = {2024-07-01},
urldate = {2024-08-16},
publisher = {arXiv},
abstract = {Recent text-guided generation of individual 3D object has achieved great success using diffusion priors. However, these methods are not suitable for object insertion and replacement tasks as they do not consider the background, leading to illumination mismatches within the environment. To bridge the gap, we introduce an illumination-aware 3D scene editing pipeline for 3D Gaussian Splatting (3DGS) representation. Our key observation is that inpainting by the state-of-the-art conditional 2D diffusion model is consistent with background in lighting. To leverage the prior knowledge from the well-trained diffusion models for 3D object generation, our approach employs a coarse-to-fine objection optimization pipeline with inpainted views. In the first coarse step, we achieve image-to-3D lifting given an ideal inpainted view. The process employs 3D-aware diffusion prior from a view-conditioned diffusion model, which preserves illumination present in the conditioning image. To acquire an ideal inpainted image, we introduce an Anchor View Proposal (AVP) algorithm to find a single view that best represents the scene illumination in target region. In the second Texture Enhancement step, we introduce a novel Depth-guided Inpainting Score Distillation Sampling (DI-SDS), which enhances geometry and texture details with the inpainting diffusion prior, beyond the scope of the 3D-aware diffusion prior knowledge in the first coarse step. DI-SDS not only provides fine-grained texture enhancement, but also urges optimization to respect scene lighting. Our approach efficiently achieves local editing with global illumination consistency without explicitly modeling light transport. We demonstrate robustness of our method by evaluating editing in real scenes containing explicit highlight and shadows, and compare against the state-of-the-art text-to-3D editing methods.},
note = {arXiv:2408.00083 [cs]},
keywords = {DTIC, VGL},
pubstate = {published},
tppubtype = {misc}
}
Huang, Shuo; Jones, Fred; Gurney, Nikolos; Pynadath, David; Srivastava, Kunal; Trent, Stoney; Wu, Peggy; Zhu, Quanyan
PsybORG+: Modeling and Simulation for Detecting Cognitive Biases in Advanced Persistent Threats Miscellaneous
2024, (Version Number: 3).
Abstract | Links | BibTeX | Tags: DTIC
@misc{huang_psyborg_2024,
title = {PsybORG+: Modeling and Simulation for Detecting Cognitive Biases in Advanced Persistent Threats},
author = {Shuo Huang and Fred Jones and Nikolos Gurney and David Pynadath and Kunal Srivastava and Stoney Trent and Peggy Wu and Quanyan Zhu},
url = {https://arxiv.org/abs/2408.01310},
doi = {10.48550/ARXIV.2408.01310},
year = {2024},
date = {2024-07-01},
urldate = {2024-12-05},
publisher = {arXiv},
abstract = {Advanced Persistent Threats (APTs) bring significant challenges to cybersecurity due to their sophisticated and stealthy nature. Traditional cybersecurity measures fail to defend against APTs. Cognitive vulnerabilities can significantly influence attackers' decision-making processes, which presents an opportunity for defenders to exploit. This work introduces PsybORG$ˆ+$, a multi-agent cybersecurity simulation environment designed to model APT behaviors influenced by cognitive vulnerabilities. A classification model is built for cognitive vulnerability inference and a simulator is designed for synthetic data generation. Results show that PsybORG$ˆ+$ can effectively model APT attackers with different loss aversion and confirmation bias levels. The classification model has at least a 0.83 accuracy rate in predicting cognitive vulnerabilities.},
note = {Version Number: 3},
keywords = {DTIC},
pubstate = {published},
tppubtype = {misc}
}
Core, Mark G.; Nye, Benjamin D.; Fegley, Brent D.
Trend-Aware Scenario Authoring: Adapting Training Toward Patterns from Real Operations Book Section
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems, vol. 14727, pp. 15–24, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-60608-3 978-3-031-60609-0, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC
@incollection{sottilare_trend-aware_2024,
title = {Trend-Aware Scenario Authoring: Adapting Training Toward Patterns from Real Operations},
author = {Mark G. Core and Benjamin D. Nye and Brent D. Fegley},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/10.1007/978-3-031-60609-0_2},
doi = {10.1007/978-3-031-60609-0_2},
isbn = {978-3-031-60608-3 978-3-031-60609-0},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-18},
booktitle = {Adaptive Instructional Systems},
volume = {14727},
pages = {15–24},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Core, Mark G.; Chereddy, Sai V. R.; Young, Vivian; Auerbach, Daniel
Bootstrapping Assessments for Team Simulations: Transfer Learning Between First-Person-Shooter Game Maps Book Section
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems, vol. 14727, pp. 261–271, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-60608-3 978-3-031-60609-0, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, Learning Sciences, Machine Learning, UARC
@incollection{sottilare_bootstrapping_2024,
title = {Bootstrapping Assessments for Team Simulations: Transfer Learning Between First-Person-Shooter Game Maps},
author = {Benjamin D. Nye and Mark G. Core and Sai V. R. Chereddy and Vivian Young and Daniel Auerbach},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/10.1007/978-3-031-60609-0_19},
doi = {10.1007/978-3-031-60609-0_19},
isbn = {978-3-031-60608-3 978-3-031-60609-0},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-18},
booktitle = {Adaptive Instructional Systems},
volume = {14727},
pages = {261–271},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, Learning Sciences, Machine Learning, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Chen, Meida; Lal, Devashish; Yu, Zifan; Xu, Jiuyi; Feng, Andrew; You, Suya; Nurunnabi, Abdul; Shi, Yangming
Large-Scale 3D Terrain Reconstruction Using 3D Gaussian Splatting for Visualization and Simulation Journal Article
In: Int. Arch. Photogramm. Remote Sens. Spatial Inf. Sci., vol. XLVIII-2-2024, pp. 49–54, 2024, ISSN: 2194-9034.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, VGL
@article{chen_large-scale_2024,
title = {Large-Scale 3D Terrain Reconstruction Using 3D Gaussian Splatting for Visualization and Simulation},
author = {Meida Chen and Devashish Lal and Zifan Yu and Jiuyi Xu and Andrew Feng and Suya You and Abdul Nurunnabi and Yangming Shi},
url = {https://isprs-archives.copernicus.org/articles/XLVIII-2-2024/49/2024/},
doi = {10.5194/isprs-archives-XLVIII-2-2024-49-2024},
issn = {2194-9034},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-20},
journal = {Int. Arch. Photogramm. Remote Sens. Spatial Inf. Sci.},
volume = {XLVIII-2-2024},
pages = {49–54},
abstract = {Abstract. The fusion of low-cost unmanned aerial systems (UAS) with advanced photogrammetric techniques has revolutionized 3D terrain reconstruction, enabling the automated creation of detailed models. Concurrently, the advent of 3D Gaussian Splatting has introduced a paradigm shift in 3D data representation, offering visually realistic renditions distinct from traditional polygon-based models. Our research builds upon this foundation, aiming to integrate Gaussian Splatting into interactive simulations for immersive virtual environments. We address challenges such as collision detection by adopting a hybrid approach, combining Gaussian Splatting with photogrammetry-derived meshes. Through comprehensive experimentation covering varying terrain sizes and Gaussian densities, we evaluate scalability, performance, and limitations. Our findings contribute to advancing the use of advanced computer graphics techniques for enhanced 3D terrain visualization and simulation.},
keywords = {DTIC, Graphics, VGL},
pubstate = {published},
tppubtype = {article}
}
Saxon, Leslie; Faulk, Robert T; Boberg, Jill; Barrett, Trevor; McLelland, Steve
In: J. Spec. Oper. Med., 2024, ISSN: 1553-9768.
Links | BibTeX | Tags: CBC, DTIC
@article{saxon_continuous_2024,
title = {Continuous Assessment of Active-Duty Army Special Operations and Reconnaissance Marines Using Digital Devices and Custom Software: The Digital Comprehensive Operator Readiness Assessment (DcORA) Study},
author = {Leslie Saxon and Robert T Faulk and Jill Boberg and Trevor Barrett and Steve McLelland},
url = {https://www.jsomonline.org/Citations/PXKK-I23D.php},
doi = {10.55460/PXKK-I23D},
issn = {1553-9768},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-25},
journal = {J. Spec. Oper. Med.},
keywords = {CBC, DTIC},
pubstate = {published},
tppubtype = {article}
}
Yin, Yinxuan; Nayyar, Mollik; Holman, Daniel; Lucas, Gale; Holbrook, Colin; Wagner, Alan
Validation and Evacuee Modeling of Virtual Robot-guided Emergency Evacuation Experiments Miscellaneous
2024.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@misc{yin_validation_2024,
title = {Validation and Evacuee Modeling of Virtual Robot-guided Emergency Evacuation Experiments},
author = {Yinxuan Yin and Mollik Nayyar and Daniel Holman and Gale Lucas and Colin Holbrook and Alan Wagner},
url = {https://osf.io/mr78s},
doi = {10.31234/osf.io/mr78s},
year = {2024},
date = {2024-06-01},
urldate = {2024-09-17},
publisher = {Center for Open Science},
abstract = {Virtual Reality (VR) is an increasingly common tool for investigating human responses to emergency situations. Nonetheless, studies validating and comparing human subject behavior during real world emergencies to their responses in VR are notably rare, and no prior studies have validated whether human emergency responses to guidance from a robot are comparable in VR versus the real world. In the present pre-registered study, we used VR to replicate a previous robot- guided emergency evacuation study conducted in the real world and compared human subject behavior in matched physical and virtual environments. In both environments, human subjects were asked to follow a robot to a location and to then read an article. While reading, a fire alarm sounds. The robot then attempted to guide them to a distant, unfamiliar exit rather than nearby and familiar exits. We observed close correspondences between evacuee exit choice (the robot’s distant exit versus closer exits), evacuation time, and trust in the robot between the VR and physical environments. We further demonstrate that data collected in virtual reality can be used to create accurate motion models (mean error of 0.42 centimeters) predicting evacuee trajectories and locations in real life. Taken together, the results provide evidence for the ecological validity of VR approaches to studying human-robot interaction, particularly robot- guided emergency evacuation.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {misc}
}
Lu, Shuhong; Jin, Zhangyu; Rajendran, Vickram; Harari, Michal; Feng, Andrew; Melo, Celso M. De
Synthetic-to-real adaptation for complex action recognition in surveillance applications Proceedings Article
In: Manser, Kimberly E.; Melo, Celso De; Rao, Raghuveer M.; Howell, Christopher L. (Ed.): Synthetic Data for Artificial Intelligence and Machine Learning: Tools, Techniques, and Applications II, pp. 14, SPIE, National Harbor, United States, 2024, ISBN: 978-1-5106-7388-5 978-1-5106-7389-2.
@inproceedings{lu_synthetic–real_2024,
title = {Synthetic-to-real adaptation for complex action recognition in surveillance applications},
author = {Shuhong Lu and Zhangyu Jin and Vickram Rajendran and Michal Harari and Andrew Feng and Celso M. De Melo},
editor = {Kimberly E. Manser and Celso De Melo and Raghuveer M. Rao and Christopher L. Howell},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/13035/3012393/Synthetic-to-real-adaptation-for-complex-action-recognition-in-surveillance/10.1117/12.3012393.full},
doi = {10.1117/12.3012393},
isbn = {978-1-5106-7388-5 978-1-5106-7389-2},
year = {2024},
date = {2024-06-01},
urldate = {2024-07-11},
booktitle = {Synthetic Data for Artificial Intelligence and Machine Learning: Tools, Techniques, and Applications II},
pages = {14},
publisher = {SPIE},
address = {National Harbor, United States},
keywords = {DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon A; Kaurloto, Cari; Winn, Jade G; Leeds, Andrew; Beland, Sarah; Sookiassian, Arman; Hartholt, Arno
Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–8, ACM, Honolulu HI USA, 2024, ISBN: 979-8-4007-0331-7.
Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@inproceedings{mozgai_accelerating_2024,
title = {Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool},
author = {Sharon A Mozgai and Cari Kaurloto and Jade G Winn and Andrew Leeds and Sarah Beland and Arman Sookiassian and Arno Hartholt},
url = {https://dl.acm.org/doi/10.1145/3613905.3637110},
doi = {10.1145/3613905.3637110},
isbn = {979-8-4007-0331-7},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-18},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chemburkar, Ankur; Gordon, Andrew; Feng, Andrew
Evaluating Vision-Language Models on the TriangleCOPA Benchmark Journal Article
In: FLAIRS-37, vol. 37, 2024.
Abstract | BibTeX | Tags: DTIC, Narrative
@article{chemburkar_evaluating_2024,
title = {Evaluating Vision-Language Models on the TriangleCOPA Benchmark},
author = {Ankur Chemburkar and Andrew Gordon and Andrew Feng},
year = {2024},
date = {2024-05-01},
journal = {FLAIRS-37},
volume = {37},
abstract = {The TriangleCOPA benchmark consists of 100 textual questions with videos depicting the movements of simple shapes in the style of the classic social-psychology film created by Fritz Heider and Marianne Simmel in 1944. In our experiments, we investigate the performance of current vision-language models on this challenging benchmark, assessing the capability of these models for visual anthropomorphism and abstract interpretation.},
keywords = {DTIC, Narrative},
pubstate = {published},
tppubtype = {article}
}