Publications
Search
Charnsethikul, Pithayuth; Zunquti, Almajd; Lucas, Gale; Mirkovic, Jelena
Navigating Social Media Privacy: Awareness, Preferences, and Discoverability Journal Article
In: PoPETs, vol. 2025, no. 4, pp. 620–638, 2025, ISSN: 2299-0984.
@article{charnsethikul_navigating_2025,
title = {Navigating Social Media Privacy: Awareness, Preferences, and Discoverability},
author = {Pithayuth Charnsethikul and Almajd Zunquti and Gale Lucas and Jelena Mirkovic},
url = {https://petsymposium.org/popets/2025/popets-2025-0148.php},
doi = {10.56553/popets-2025-0148},
issn = {2299-0984},
year  = {2025},
date = {2025-10-01},
urldate = {2025-08-19},
journal = {PoPETs},
volume = {2025},
number = {4},
pages = {620–638},
abstract = {Social media platforms provide various privacy settings, which users can adjust to fit their privacy needs. Platforms claim that this is sufficient – users have power to accept the default settings they like, and change those they do not like. In this paper, we seek to quantify user awareness of, preferences around and ability to adjust social media privacy settings. We conduct an online survey of 541 participants across six different social media platforms: Facebook, Instagram, X, LinkedIn, TikTok, and Snapchat. We focus on nine privacy settings that are commonly available across these platforms, and evaluate participants’ preferences for privacy, awareness of the privacy settings and ability to locate them. We find that default settings are ill-aligned with user preferences – 92% of participants prefer at least one of the privacy options to be more private than the default. We further find that users are generally not aware of privacy settings, and struggle to find them. 80% of participants have never seen at least one privacy setting, and 79% of participants rated at least one setting as hard to find. We also find that the fewer privacy settings a user has seen, the harder for them to locate those settings, and the higher the level of privacy they desire. Additionally, we find that there are significant differences in privacy setting preferences and usability across different user age groups and across platforms. Older users are more conservative about their privacy, they have seen significantly fewer privacy settings, and they spend significantly more time locating them than younger users. On some platforms, like LinkedIn, users opt for higher visibility, while on others they prefer more privacy. Some platforms, like TikTok, make it significantly easier for users to locate privacy settings. Based on our findings, we provide recommendations on default values and how to improve usability of privacy settings on social media.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kwon, Deuksin; Shrestha, Kaleen; Han, Bin; Lee, Elena Hayoung; Lucas, Gale
Evaluating Behavioral Alignment in Conflict Dialogue: A Multi-Dimensional Comparison of LLM Agents and Humans Miscellaneous
2025, (arXiv:2509.16394 [cs]).
@misc{kwon_evaluating_2025,
title = {Evaluating Behavioral Alignment in Conflict Dialogue: A Multi-Dimensional Comparison of LLM Agents and Humans},
author = {Deuksin Kwon and Kaleen Shrestha and Bin Han and Elena Hayoung Lee and Gale Lucas},
url = {http://arxiv.org/abs/2509.16394},
doi = {10.48550/arXiv.2509.16394},
year  = {2025},
date = {2025-09-01},
urldate = {2025-09-25},
publisher = {arXiv},
abstract = {Large Language Models (LLMs) are increasingly deployed in socially complex, interaction-driven tasks, yet their ability to mirror human behavior in emotionally and strategically complex contexts remains underexplored. This study assesses the behavioral alignment of personality-prompted LLMs in adversarial dispute resolution by simulating multi-turn conflict dialogues that incorporate negotiation. Each LLM is guided by a matched Five-Factor personality profile to control for individual variation and enhance realism. We evaluate alignment across three dimensions: linguistic style, emotional expression (e.g., anger dynamics), and strategic behavior. GPT-4.1 achieves the closest alignment with humans in linguistic style and emotional dynamics, while Claude-3.7-Sonnet best reflects strategic behavior. Nonetheless, substantial alignment gaps persist. Our findings establish a benchmark for alignment between LLMs and humans in socially complex interactions, underscoring both the promise and the limitations of personality conditioning in dialogue modeling.},
note = {arXiv:2509.16394 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Wang, Yunzhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Ustun, Volkan
Implicit Behavioral Alignment of Language Agents in High-Stakes Crowd Simulations Miscellaneous
2025, (arXiv:2509.16457 [cs]).
@misc{wang_implicit_2025,
title = {Implicit Behavioral Alignment of Language Agents in High-Stakes Crowd Simulations},
author = {Yunzhe Wang and Gale M. Lucas and Burcin Becerik-Gerber and Volkan Ustun},
url = {http://arxiv.org/abs/2509.16457},
doi = {10.48550/arXiv.2509.16457},
year  = {2025},
date = {2025-09-01},
urldate = {2025-09-25},
publisher = {arXiv},
abstract = {Language-driven generative agents have enabled large-scale social simulations with transformative uses, from interpersonal training to aiding global policy-making. However, recent studies indicate that generative agent behaviors often deviate from expert expectations and real-world data–a phenomenon we term the Behavior-Realism Gap. To address this, we introduce a theoretical framework called Persona-Environment Behavioral Alignment (PEBA), formulated as a distribution matching problem grounded in Lewin's behavior equation stating that behavior is a function of the person and their environment. Leveraging PEBA, we propose PersonaEvolve (PEvo), an LLM-based optimization algorithm that iteratively refines agent personas, implicitly aligning their collective behaviors with realistic expert benchmarks within a specified environmental context. We validate PEvo in an active shooter incident simulation we developed, achieving an 84% average reduction in distributional divergence compared to no steering and a 34% improvement over explicit instruction baselines. Results also show PEvo-refined personas generalize to novel, related simulation scenarios. Our method greatly enhances behavioral realism and reliability in high-stakes social simulations. More broadly, the PEBA-PEvo framework provides a principled approach to developing trustworthy LLM-driven social simulations.},
note = {arXiv:2509.16457 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Wang, Yunzhe; Ustun, Volkan; McGroarty, Chris
A data-driven discretized CS:GO simulation environment to facilitate strategic multi-agent planning research Miscellaneous
2025, (arXiv:2509.06355 [cs]).
@misc{wang_data-driven_2025,
title = {A data-driven discretized CS:GO simulation environment to facilitate strategic multi-agent planning research},
author = {Yunzhe Wang and Volkan Ustun and Chris McGroarty},
url = {http://arxiv.org/abs/2509.06355},
doi = {10.48550/arXiv.2509.06355},
year  = {2025},
date = {2025-09-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {Modern simulation environments for complex multi-agent interactions must balance high-fidelity detail with computational efficiency. We present DECOY, a novel multi-agent simulator that abstracts strategic, long-horizon planning in 3D terrains into high-level discretized simulation while preserving low-level environmental fidelity. Using Counter-Strike: Global Offensive (CS:GO) as a testbed, our framework accurately simulates gameplay using only movement decisions as tactical positioning – without explicitly modeling low-level mechanics such as aiming and shooting. Central to our approach is a waypoint system that simplifies and discretizes continuous states and actions, paired with neural predictive and generative models trained on real CS:GO tournament data to reconstruct event outcomes. Extensive evaluations show that replays generated from human data in DECOY closely match those observed in the original game. Our publicly available simulation environment provides a valuable tool for advancing research in strategic multi-agent planning and behavior generation.},
note = {arXiv:2509.06355 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Oh, Jinwoo; Chen, Po-Yu; Hsing, Hsiang-Wen; Lau, Nathan; Wu, Peggy; Srivastava, Kunal; Gurney, Nikolos; Molinaro, Kylie; Trent, Stoney
Understanding Cybersecurity Skill Levels Through Psychological Measures: Clustering Hackers with Traits Questionnaires Journal Article
In: Proceedings of the Human Factors and Ergonomics Society Annual Meeting, pp. 10711813251371034, 2025, ISSN: 1071-1813, 2169-5067.
@article{oh_understanding_2025,
title = {Understanding Cybersecurity Skill Levels Through Psychological Measures: Clustering Hackers with Traits Questionnaires},
author = {Jinwoo Oh and Po-Yu Chen and Hsiang-Wen Hsing and Nathan Lau and Peggy Wu and Kunal Srivastava and Nikolos Gurney and Kylie Molinaro and Stoney Trent},
url = {https://journals.sagepub.com/doi/10.1177/10711813251371034},
doi = {10.1177/10711813251371034},
issn = {1071-1813, 2169-5067},
year  = {2025},
date = {2025-09-01},
urldate = {2025-09-18},
journal = {Proceedings of the Human Factors and Ergonomics Society Annual Meeting},
pages = {10711813251371034},
abstract = {In cybersecurity, performance in offensive tasks such as penetration testing or red-team exercises can be influenced by both technical skill and psychological traits. This exploratory study examines how specific psychometric characteristics relate to hacking performance in a controlled environment. Sixty-one participants who passed a cybersecurity skills test completed a two-day simulated hacking exercise and responded to psychometric questionnaires. A Random Forest analysis identified five questionnaire items—drawn from decision-making and personality measures—as the most predictive of cybersecurity skills test scores. The responses to these items were used in a k-means clustering analysis ( 
 k = 3), which revealed significant differences in skills test scores and response patterns across clusters. The findings suggest that certain psychological traits may serve as auxiliary indicators of cybersecurity skill. Further research could explore this relationship using aggregated trait-level metrics and broader participant samples, including professional red-teamers, to examine the robustness of these preliminary findings in more ecologically valid settings.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
k = 3), which revealed significant differences in skills test scores and response patterns across clusters. The findings suggest that certain psychological traits may serve as auxiliary indicators of cybersecurity skill. Further research could explore this relationship using aggregated trait-level metrics and broader participant samples, including professional red-teamers, to examine the robustness of these preliminary findings in more ecologically valid settings.
Liu, Ruying; Wu, Wanjing; Becerik-Gerber, Burcin; Lucas, Gale M.; Laboy, Michelle; Fannon, David
Reinforcement learning for evaluating school safety designs in active shooter incidents Journal Article
In: Advanced Engineering Informatics, vol. 67, pp. 103575, 2025, ISSN: 14740346.
@article{liu_reinforcement_2025,
title = {Reinforcement learning for evaluating school safety designs in active shooter incidents},
author = {Ruying Liu and Wanjing Wu and Burcin Becerik-Gerber and Gale M. Lucas and Michelle Laboy and David Fannon},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1474034625004689},
doi = {10.1016/j.aei.2025.103575},
issn = {14740346},
year  = {2025},
date = {2025-09-01},
urldate = {2025-08-19},
journal = {Advanced Engineering Informatics},
volume = {67},
pages = {103575},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gomez-Zaragoza, Lucia; Marin-Morales, Javier; Alcaniz, Mariano; Soleymani, Mohammed
Speech and Text Foundation Models for Depression Detection: Cross-Task and Cross-Language Evaluation Proceedings Article
In: Rotterdam, The Netherlands, 2025.
@inproceedings{gomez-zaragoza_speech_2025,
title = {Speech and Text Foundation Models for Depression Detection: Cross-Task and Cross-Language Evaluation},
author = {Lucia Gomez-Zaragoza and Javier Marin-Morales and Mariano Alcaniz and Mohammed Soleymani},
url = {https://www.isca-archive.org/interspeech_2025/gomezzaragoza25_interspeech.html#},
year  = {2025},
date = {2025-08-01},
address = {Rotterdam, The Netherlands},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rakshit, Sushrita; Hale, James; Chawla, Kushal; Brett, Jeanne M.; Gratch, Jonathan
Emotionally-Aware Agents for Dispute Resolution Miscellaneous
2025, (arXiv:2509.04465 [cs]).
@misc{rakshit_emotionally-aware_2025,
title = {Emotionally-Aware Agents for Dispute Resolution},
author = {Sushrita Rakshit and James Hale and Kushal Chawla and Jeanne M. Brett and Jonathan Gratch},
url = {http://arxiv.org/abs/2509.04465},
doi = {10.48550/arXiv.2509.04465},
year  = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {In conflict, people use emotional expressions to shape their counterparts' thoughts, feelings, and actions. This paper explores whether automatic text emotion recognition offers insight into this influence in the context of dispute resolution. Prior work has shown the promise of such methods in negotiations; however, disputes evoke stronger emotions and different social processes. We use a large corpus of buyer-seller dispute dialogues to investigate how emotional expressions shape subjective and objective outcomes. We further demonstrate that large-language models yield considerably greater explanatory power than previous methods for emotion intensity annotation and better match the decisions of human annotators. Findings support existing theoretical models for how emotional expressions contribute to conflict escalation and resolution and suggest that agent-based systems could be useful in managing disputes by recognizing and potentially mitigating emotional escalation.},
note = {arXiv:2509.04465 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Beltz, Brandon; Doty, Jim; Fonken, Yvonne; Gurney, Nikolos; Israelsen, Brett; Lau, Nathan; Marsella, Stacy; Thomas, Rachelle; Trent, Stoney; Wu, Peggy; Yang, Ya-Ting; Zhu, Quanyan
2025, (arXiv:2508.20963 [cs]).
@misc{beltz_guarding_2025,
title = {Guarding Against Malicious Biased Threats (GAMBiT) Experiments: Revealing Cognitive Bias in Human-Subjects Red-Team Cyber Range Operations},
author = {Brandon Beltz and Jim Doty and Yvonne Fonken and Nikolos Gurney and Brett Israelsen and Nathan Lau and Stacy Marsella and Rachelle Thomas and Stoney Trent and Peggy Wu and Ya-Ting Yang and Quanyan Zhu},
url = {http://arxiv.org/abs/2508.20963},
doi = {10.48550/arXiv.2508.20963},
year  = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {We present three large-scale human-subjects red-team cyber range datasets from the Guarding Against Malicious Biased Threats (GAMBiT) project. Across Experiments 1-3 (July 2024-March 2025), 19-20 skilled attackers per experiment conducted two 8-hour days of self-paced operations in a simulated enterprise network (SimSpace Cyber Force Platform) while we captured multi-modal data: self-reports (background, demographics, psychometrics), operational notes, terminal histories, keylogs, network packet captures (PCAP), and NIDS alerts (Suricata). Each participant began from a standardized Kali Linux VM and pursued realistic objectives (e.g., target discovery and data exfiltration) under controlled constraints. Derivative curated logs and labels are included. The combined release supports research on attacker behavior modeling, bias-aware analytics, and method benchmarking. Data are available via IEEE Dataport entries for Experiments 1-3.},
note = {arXiv:2508.20963 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Chen, Meida; Leal, Luis; Hu, Yue; Liu, Rong; Xiong, Butian; Feng, Andrew; Xu, Jiuyi; Shi, Yangming
IDU: Incremental Dynamic Update of Existing 3D Virtual Environments with New Imagery Data Miscellaneous
2025, (arXiv:2508.17579 [cs]).
@misc{chen_idu_2025,
title = {IDU: Incremental Dynamic Update of Existing 3D Virtual Environments with New Imagery Data},
author = {Meida Chen and Luis Leal and Yue Hu and Rong Liu and Butian Xiong and Andrew Feng and Jiuyi Xu and Yangming Shi},
url = {http://arxiv.org/abs/2508.17579},
doi = {10.48550/arXiv.2508.17579},
year  = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {For simulation and training purposes, military organizations have made substantial investments in developing high-resolution 3D virtual environments through extensive imaging and 3D scanning. However, the dynamic nature of battlefield conditions-where objects may appear or vanish over time-makes frequent full-scale updates both time-consuming and costly. In response, we introduce the Incremental Dynamic Update (IDU) pipeline, which efficiently updates existing 3D reconstructions, such as 3D Gaussian Splatting (3DGS), with only a small set of newly acquired images. Our approach starts with camera pose estimation to align new images with the existing 3D model, followed by change detection to pinpoint modifications in the scene. A 3D generative AI model is then used to create high-quality 3D assets of the new elements, which are seamlessly integrated into the existing 3D model. The IDU pipeline incorporates human guidance to ensure high accuracy in object identification and placement, with each update focusing on a single new object at a time. Experimental results confirm that our proposed IDU pipeline significantly reduces update time and labor, offering a cost-effective and targeted solution for maintaining up-to-date 3D models in rapidly evolving military scenarios.},
note = {arXiv:2508.17579 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Hans, Soham; Gurney, Nikolos; Marsella, Stacy; Hirschmann, Sofia
Quantifying Loss Aversion in Cyber Adversaries via LLM Analysis Miscellaneous
2025, (arXiv:2508.13240 [cs]).
@misc{hans_quantifying_2025,
title = {Quantifying Loss Aversion in Cyber Adversaries via LLM Analysis},
author = {Soham Hans and Nikolos Gurney and Stacy Marsella and Sofia Hirschmann},
url = {http://arxiv.org/abs/2508.13240},
doi = {10.48550/arXiv.2508.13240},
year  = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {Understanding and quantifying human cognitive biases from empirical data has long posed a formidable challenge, particularly in cybersecurity, where defending against unknown adversaries is paramount. Traditional cyber defense strategies have largely focused on fortification, while some approaches attempt to anticipate attacker strategies by mapping them to cognitive vulnerabilities, yet they fall short in dynamically interpreting attacks in progress. In recognition of this gap, IARPA's ReSCIND program seeks to infer, defend against, and even exploit attacker cognitive traits. In this paper, we present a novel methodology that leverages large language models (LLMs) to extract quantifiable insights into the cognitive bias of loss aversion from hacker behavior. Our data are collected from an experiment in which hackers were recruited to attack a controlled demonstration network. We process the hacker generated notes using LLMs using it to segment the various actions and correlate the actions to predefined persistence mechanisms used by hackers. By correlating the implementation of these mechanisms with various operational triggers, our analysis provides new insights into how loss aversion manifests in hacker decision-making. The results demonstrate that LLMs can effectively dissect and interpret nuanced behavioral patterns, thereby offering a transformative approach to enhancing cyber defense strategies through real-time, behavior-based analysis.},
note = {arXiv:2508.13240 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Wang, Zihao; Rodrigues, Patrick Borges; Fang, Yiyang; Soibelman, Lucio; Becerik-Gerber, Burcin; Roll, Shawn C.; Lucas, Gale M
Understanding Potential Challenges in Demolition Robot Teleoperation to Inform Interface Design: Insights from Industry Professionals Journal Article
In: CIB Conferences, vol. 1, no. 1, 2025, ISSN: 3067-4883.
@article{wang_understanding_2025,
title = {Understanding Potential Challenges in Demolition Robot Teleoperation to Inform Interface Design: Insights from Industry Professionals},
author = {Zihao Wang and Patrick Borges Rodrigues and Yiyang Fang and Lucio Soibelman and Burcin Becerik-Gerber and Shawn C. Roll and Gale M Lucas},
url = {https://docs.lib.purdue.edu/cib-conferences/vol1/iss1/106},
doi = {10.7771/3067-4883.2040},
issn = {3067-4883},
year  = {2025},
date = {2025-06-01},
urldate = {2025-08-19},
journal = {CIB Conferences},
volume = {1},
number = {1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Miller, John H.; Pynadath, David V.
Exploring the choice landscape: Anchoring and framing effects on search behavior in complex choices Journal Article
In: Journal of Choice Modelling, vol. 55, pp. 100549, 2025, ISSN: 17555345.
@article{gurney_exploring_2025,
title = {Exploring the choice landscape: Anchoring and framing effects on search behavior in complex choices},
author = {Nikolos Gurney and John H. Miller and David V. Pynadath},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1755534525000120},
doi = {10.1016/j.jocm.2025.100549},
issn = {17555345},
year  = {2025},
date = {2025-06-01},
urldate = {2025-04-15},
journal = {Journal of Choice Modelling},
volume = {55},
pages = {100549},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Klumpe, Stella; Mitchell, Kelsey C.; Cox, Emma; Katz, Jeffrey S.; Lazarowski, Lucia; Deshpande, Gopikrishna; Gratch, Jonathan; Visser, Ewart J. De; Ayaz, Hasan; Li, Xingnan; Franke, Adrian A.; Krueger, Frank
Social bonding between humans, animals, and robots: Dogs outperform AIBOs, their robotic replicas, as social companions Journal Article
In: PLoS One, vol. 20, no. 6, pp. e0324312, 2025, ISSN: 1932-6203.
@article{klumpe_social_2025,
title = {Social bonding between humans, animals, and robots: Dogs outperform AIBOs, their robotic replicas, as social companions},
author = {Stella Klumpe and Kelsey C. Mitchell and Emma Cox and Jeffrey S. Katz and Lucia Lazarowski and Gopikrishna Deshpande and Jonathan Gratch and Ewart J. De Visser and Hasan Ayaz and Xingnan Li and Adrian A. Franke and Frank Krueger},
editor = {Casey R. Lynch},
url = {https://dx.plos.org/10.1371/journal.pone.0324312},
doi = {10.1371/journal.pone.0324312},
issn = {1932-6203},
year  = {2025},
date = {2025-06-01},
urldate = {2025-06-12},
journal = {PLoS One},
volume = {20},
number = {6},
pages = {e0324312},
abstract = {In the evolving landscape of technology, robots have emerged as social companions, prompting an investigation into social bonding between humans and robots. While human-animal interactions are well-studied, human-robot interactions (HRI) remain comparatively underexplored. Ethorobotics, a field of social robotic engineering based on ecology and ethology, suggests designing companion robots modeled on animal companions, which are simpler to emulate than humans. However, it is unclear whether these robots can match the social companionship provided by their original models. This study examined social bonding between humans and AIBOs, dog-inspired companion robots, compared to real dogs. Nineteen female participants engaged in 12 affiliative interactions with dogs and AIBOs across two counter-balanced, one-month bonding phases. Social bonding was assessed through urinary oxytocin (OXT) level change over an interaction, self-reported attachment using an adapted version of the Lexington Attachment to Pets Scale, and social companionship evaluations administering the Robot-Dog Questionnaire. To examine OXT level changes and self-reported attachment by comparing the two social companions, we conducted mixed-effects model analyses and planned follow-up comparisons. Frequency comparison, binary logistic regression, and thematic analysis were performed to analyze social companionship evaluations. Results revealed significant differences between dogs and AIBOs in fostering social bonds. OXT level change increased during interactions with dogs but decreased with AIBOs. Participants reported stronger attachment to dogs and rated them as better social companions. These findings highlight the current limitations of AIBOs in fostering social bonding immediately compared to dogs. Our study contributes to the growing HRI research by demonstrating an existing gap between AIBOs and dogs as social companions. It highlights the need for further investigation to understand the complexities of social bonding with companion robots, which is essential to implement successful applications for social robots in diverse domains such as the elderly and health care, education, and entertainment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hale, James; Kim, HanMoe; Choi, Ahyoung; Gratch, Jonathan
AI-Mediated Dispute Resolution Journal Article
In: AAAI-SS, vol. 5, no. 1, pp. 67–70, 2025, ISSN: 2994-4317.
@article{hale_ai-mediated_2025,
title = {AI-Mediated Dispute Resolution},
author = {James Hale and HanMoe Kim and Ahyoung Choi and Jonathan Gratch},
url = {https://ojs.aaai.org/index.php/AAAI-SS/article/view/35558},
doi = {10.1609/aaaiss.v5i1.35558},
issn = {2994-4317},
year  = {2025},
date = {2025-05-01},
urldate = {2025-08-19},
journal = {AAAI-SS},
volume = {5},
number = {1},
pages = {67–70},
abstract = {We examine the effectiveness of large language model (LLM) mediations in the under-studied dispute resolution domain. We first used a new corpus of dispute resolutions, KODIS, to investigate if LLMs can correctly identify whether to intervene. We find evidence that GPT as a mediator picks up on salient aspects of a dispute, such as Frustration and whether the disputants ultimately come to a resolution or stall at an impasse — intervening significantly more so in cases of high frustration and impasse. Afterward, we ran a user study to compare GPT mediations against those of novice human mediators. We find participants agreed GPT's mediations were more likely to lead to resolution; were better positioned in the dialog; had better justification than human-crafted ones; and, on a forced choice, were generally more effective than novice human mediations.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Int J Artif Intell Educ, 2025, ISSN: 1560-4292, 1560-4306.
@article{okado_how_2025,
title = {How Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
url = {https://link.springer.com/10.1007/s40593-025-00482-w},
doi = {10.1007/s40593-025-00482-w},
issn = {1560-4292, 1560-4306},
year  = {2025},
date = {2025-05-01},
urldate = {2025-06-24},
journal = {Int J Artif Intell Educ},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Core, Mark; Nye, Benjamin; Carr, Kayla; Li, Shirley; Shiel, Aaron; Auerbach, Daniel; Leeds, Andrew; Swartout, William
Usability and Preferences for a Personalized Adaptive Learning System for AI Upskilling Journal Article
In: FLAIRS, vol. 38, 2025, ISSN: 2334-0762, 2334-0754.
@article{core_usability_2025,
title = {Usability and Preferences for a Personalized Adaptive Learning System for AI Upskilling},
author = {Mark Core and Benjamin Nye and Kayla Carr and Shirley Li and Aaron Shiel and Daniel Auerbach and Andrew Leeds and William Swartout},
url = {https://journals.flvc.org/FLAIRS/article/view/138996},
doi = {10.32473/flairs.38.1.138996},
issn = {2334-0762, 2334-0754},
year  = {2025},
date = {2025-05-01},
urldate = {2025-05-20},
journal = {FLAIRS},
volume = {38},
abstract = {As AI tools become common across jobs and industries, it is critical to broaden education about AI beyond teaching computer scientists how to build AI systems. To expand AI education, we are researching AI for AI learning: a personalized and adaptive learning system that integrates dialog-based tutoring and gamified programming activities. To study this problem, we adapted and expanded an existing smartphone adaptive coach to develop the Game-if-AI system. Using a design-based research approach, Game-if-AI was iteratively tested and improved across four semesters of optional use in a course designed for technician-level understanding of AI: mastering programming skills to apply AI libraries and established models. In this study, we measured the interests and needs of these technical learners, based on both survey data and on how they engaged with topics in the system. Based on this data, new topics were added and the system was refined. In this paper, we report students' usability ratings for system components and student preferences based on completion rates of AI topics available each semester. Students rated the adaptive system positively overall (93% rated as a "good idea"), but more complex learning activities (tutoring dialogs, programming) were rated lower than traditional ones (e.g., multiple choice, reading). Students were most likely to master topics highly aligned to the course materials, as well as self-directed learning toward easier high-interest topics (e.g., LLM Prompting).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Fu, Boxi; Dincer, Betul; Masur, Omkar; Faizi, David; Ravindran, Harshul; Wang, Julia; Lai, Devashish; Merchant, Chirag
Becoming Fei: An Educational Game for AI and Data Science Education for Novice Learners Book Section
In: Smith, Brian K.; Borge, Marcela (Ed.): Learning and Collaboration Technologies, vol. 15808, pp. 69–79, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-93745-3 978-3-031-93746-0, (Series Title: Lecture Notes in Computer Science).
@incollection{smith_becoming_2025,
title = {Becoming Fei: An Educational Game for AI and Data Science Education for Novice Learners},
author = {Ning Wang and Boxi Fu and Betul Dincer and Omkar Masur and David Faizi and Harshul Ravindran and Julia Wang and Devashish Lai and Chirag Merchant},
editor = {Brian K. Smith and Marcela Borge},
url = {https://link.springer.com/10.1007/978-3-031-93746-0_6},
doi = {10.1007/978-3-031-93746-0_6},
isbn = {978-3-031-93745-3 978-3-031-93746-0},
year  = {2025},
date = {2025-05-01},
urldate = {2025-06-12},
booktitle = {Learning and Collaboration Technologies},
volume = {15808},
pages = {69–79},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
The Impact of Color Correlated Temperature and Illuminance Levels of Office Lighting on Stress and Cognitive Restoration Journal Article
In: Journal of Environmental Psychology, pp. 102628, 2025, ISSN: 02724944.
@article{awada_impact_2025,
title = {The Impact of Color Correlated Temperature and Illuminance Levels of Office Lighting on Stress and Cognitive Restoration},
author = {Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494425001112},
doi = {10.1016/j.jenvp.2025.102628},
issn = {02724944},
year  = {2025},
date = {2025-05-01},
urldate = {2025-05-20},
journal = {Journal of Environmental Psychology},
pages = {102628},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew
Logical Abduction as a Computational Model of Narrative Proceedings Article
In: Geneva, Switzerland, 2025.
@inproceedings{gordon_andrew_logical_2025,
title = {Logical Abduction as a Computational Model of Narrative},
author = {Andrew Gordon},
url = {chrome-extension://efaidnbmnnnibpcajpcglclefindmkaj/https://asgordon.github.io/publications/CMN2025.PDF},
year  = {2025},
date = {2025-05-01},
address = {Geneva, Switzerland},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2025
Charnsethikul, Pithayuth; Zunquti, Almajd; Lucas, Gale; Mirkovic, Jelena
Navigating Social Media Privacy: Awareness, Preferences, and Discoverability Journal Article
In: PoPETs, vol. 2025, no. 4, pp. 620–638, 2025, ISSN: 2299-0984.
Abstract | Links | BibTeX | Tags: DTIC, Social
@article{charnsethikul_navigating_2025,
title = {Navigating Social Media Privacy: Awareness, Preferences, and Discoverability},
author = {Pithayuth Charnsethikul and Almajd Zunquti and Gale Lucas and Jelena Mirkovic},
url = {https://petsymposium.org/popets/2025/popets-2025-0148.php},
doi = {10.56553/popets-2025-0148},
issn = {2299-0984},
year  = {2025},
date = {2025-10-01},
urldate = {2025-08-19},
journal = {PoPETs},
volume = {2025},
number = {4},
pages = {620–638},
abstract = {Social media platforms provide various privacy settings, which users can adjust to fit their privacy needs. Platforms claim that this is sufficient – users have power to accept the default settings they like, and change those they do not like. In this paper, we seek to quantify user awareness of, preferences around and ability to adjust social media privacy settings. We conduct an online survey of 541 participants across six different social media platforms: Facebook, Instagram, X, LinkedIn, TikTok, and Snapchat. We focus on nine privacy settings that are commonly available across these platforms, and evaluate participants’ preferences for privacy, awareness of the privacy settings and ability to locate them. We find that default settings are ill-aligned with user preferences – 92% of participants prefer at least one of the privacy options to be more private than the default. We further find that users are generally not aware of privacy settings, and struggle to find them. 80% of participants have never seen at least one privacy setting, and 79% of participants rated at least one setting as hard to find. We also find that the fewer privacy settings a user has seen, the harder for them to locate those settings, and the higher the level of privacy they desire. Additionally, we find that there are significant differences in privacy setting preferences and usability across different user age groups and across platforms. Older users are more conservative about their privacy, they have seen significantly fewer privacy settings, and they spend significantly more time locating them than younger users. On some platforms, like LinkedIn, users opt for higher visibility, while on others they prefer more privacy. Some platforms, like TikTok, make it significantly easier for users to locate privacy settings. Based on our findings, we provide recommendations on default values and how to improve usability of privacy settings on social media.},
keywords = {DTIC, Social},
pubstate = {published},
tppubtype = {article}
}
Kwon, Deuksin; Shrestha, Kaleen; Han, Bin; Lee, Elena Hayoung; Lucas, Gale
Evaluating Behavioral Alignment in Conflict Dialogue: A Multi-Dimensional Comparison of LLM Agents and Humans Miscellaneous
2025, (arXiv:2509.16394 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC, LLM
@misc{kwon_evaluating_2025,
title = {Evaluating Behavioral Alignment in Conflict Dialogue: A Multi-Dimensional Comparison of LLM Agents and Humans},
author = {Deuksin Kwon and Kaleen Shrestha and Bin Han and Elena Hayoung Lee and Gale Lucas},
url = {http://arxiv.org/abs/2509.16394},
doi = {10.48550/arXiv.2509.16394},
year  = {2025},
date = {2025-09-01},
urldate = {2025-09-25},
publisher = {arXiv},
abstract = {Large Language Models (LLMs) are increasingly deployed in socially complex, interaction-driven tasks, yet their ability to mirror human behavior in emotionally and strategically complex contexts remains underexplored. This study assesses the behavioral alignment of personality-prompted LLMs in adversarial dispute resolution by simulating multi-turn conflict dialogues that incorporate negotiation. Each LLM is guided by a matched Five-Factor personality profile to control for individual variation and enhance realism. We evaluate alignment across three dimensions: linguistic style, emotional expression (e.g., anger dynamics), and strategic behavior. GPT-4.1 achieves the closest alignment with humans in linguistic style and emotional dynamics, while Claude-3.7-Sonnet best reflects strategic behavior. Nonetheless, substantial alignment gaps persist. Our findings establish a benchmark for alignment between LLMs and humans in socially complex interactions, underscoring both the promise and the limitations of personality conditioning in dialogue modeling.},
note = {arXiv:2509.16394 [cs]},
keywords = {AI, DTIC, LLM},
pubstate = {published},
tppubtype = {misc}
}
Wang, Yunzhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Ustun, Volkan
Implicit Behavioral Alignment of Language Agents in High-Stakes Crowd Simulations Miscellaneous
2025, (arXiv:2509.16457 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC, Virtual Agents
@misc{wang_implicit_2025,
title = {Implicit Behavioral Alignment of Language Agents in High-Stakes Crowd Simulations},
author = {Yunzhe Wang and Gale M. Lucas and Burcin Becerik-Gerber and Volkan Ustun},
url = {http://arxiv.org/abs/2509.16457},
doi = {10.48550/arXiv.2509.16457},
year  = {2025},
date = {2025-09-01},
urldate = {2025-09-25},
publisher = {arXiv},
abstract = {Language-driven generative agents have enabled large-scale social simulations with transformative uses, from interpersonal training to aiding global policy-making. However, recent studies indicate that generative agent behaviors often deviate from expert expectations and real-world data–a phenomenon we term the Behavior-Realism Gap. To address this, we introduce a theoretical framework called Persona-Environment Behavioral Alignment (PEBA), formulated as a distribution matching problem grounded in Lewin's behavior equation stating that behavior is a function of the person and their environment. Leveraging PEBA, we propose PersonaEvolve (PEvo), an LLM-based optimization algorithm that iteratively refines agent personas, implicitly aligning their collective behaviors with realistic expert benchmarks within a specified environmental context. We validate PEvo in an active shooter incident simulation we developed, achieving an 84% average reduction in distributional divergence compared to no steering and a 34% improvement over explicit instruction baselines. Results also show PEvo-refined personas generalize to novel, related simulation scenarios. Our method greatly enhances behavioral realism and reliability in high-stakes social simulations. More broadly, the PEBA-PEvo framework provides a principled approach to developing trustworthy LLM-driven social simulations.},
note = {arXiv:2509.16457 [cs]},
keywords = {AI, DTIC, Virtual Agents},
pubstate = {published},
tppubtype = {misc}
}
Wang, Yunzhe; Ustun, Volkan; McGroarty, Chris
A data-driven discretized CS:GO simulation environment to facilitate strategic multi-agent planning research Miscellaneous
2025, (arXiv:2509.06355 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Simulation
@misc{wang_data-driven_2025,
title = {A data-driven discretized CS:GO simulation environment to facilitate strategic multi-agent planning research},
author = {Yunzhe Wang and Volkan Ustun and Chris McGroarty},
url = {http://arxiv.org/abs/2509.06355},
doi = {10.48550/arXiv.2509.06355},
year  = {2025},
date = {2025-09-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {Modern simulation environments for complex multi-agent interactions must balance high-fidelity detail with computational efficiency. We present DECOY, a novel multi-agent simulator that abstracts strategic, long-horizon planning in 3D terrains into high-level discretized simulation while preserving low-level environmental fidelity. Using Counter-Strike: Global Offensive (CS:GO) as a testbed, our framework accurately simulates gameplay using only movement decisions as tactical positioning – without explicitly modeling low-level mechanics such as aiming and shooting. Central to our approach is a waypoint system that simplifies and discretizes continuous states and actions, paired with neural predictive and generative models trained on real CS:GO tournament data to reconstruct event outcomes. Extensive evaluations show that replays generated from human data in DECOY closely match those observed in the original game. Our publicly available simulation environment provides a valuable tool for advancing research in strategic multi-agent planning and behavior generation.},
note = {arXiv:2509.06355 [cs]},
keywords = {DTIC, Simulation},
pubstate = {published},
tppubtype = {misc}
}
Oh, Jinwoo; Chen, Po-Yu; Hsing, Hsiang-Wen; Lau, Nathan; Wu, Peggy; Srivastava, Kunal; Gurney, Nikolos; Molinaro, Kylie; Trent, Stoney
Understanding Cybersecurity Skill Levels Through Psychological Measures: Clustering Hackers with Traits Questionnaires Journal Article
In: Proceedings of the Human Factors and Ergonomics Society Annual Meeting, pp. 10711813251371034, 2025, ISSN: 1071-1813, 2169-5067.
Abstract | Links | BibTeX | Tags: DTIC, Security
@article{oh_understanding_2025,
title = {Understanding Cybersecurity Skill Levels Through Psychological Measures: Clustering Hackers with Traits Questionnaires},
author = {Jinwoo Oh and Po-Yu Chen and Hsiang-Wen Hsing and Nathan Lau and Peggy Wu and Kunal Srivastava and Nikolos Gurney and Kylie Molinaro and Stoney Trent},
url = {https://journals.sagepub.com/doi/10.1177/10711813251371034},
doi = {10.1177/10711813251371034},
issn = {1071-1813, 2169-5067},
year  = {2025},
date = {2025-09-01},
urldate = {2025-09-18},
journal = {Proceedings of the Human Factors and Ergonomics Society Annual Meeting},
pages = {10711813251371034},
abstract = {In cybersecurity, performance in offensive tasks such as penetration testing or red-team exercises can be influenced by both technical skill and psychological traits. This exploratory study examines how specific psychometric characteristics relate to hacking performance in a controlled environment. Sixty-one participants who passed a cybersecurity skills test completed a two-day simulated hacking exercise and responded to psychometric questionnaires. A Random Forest analysis identified five questionnaire items—drawn from decision-making and personality measures—as the most predictive of cybersecurity skills test scores. The responses to these items were used in a k-means clustering analysis ( 
 k = 3), which revealed significant differences in skills test scores and response patterns across clusters. The findings suggest that certain psychological traits may serve as auxiliary indicators of cybersecurity skill. Further research could explore this relationship using aggregated trait-level metrics and broader participant samples, including professional red-teamers, to examine the robustness of these preliminary findings in more ecologically valid settings.},
keywords = {DTIC, Security},
pubstate = {published},
tppubtype = {article}
}
k = 3), which revealed significant differences in skills test scores and response patterns across clusters. The findings suggest that certain psychological traits may serve as auxiliary indicators of cybersecurity skill. Further research could explore this relationship using aggregated trait-level metrics and broader participant samples, including professional red-teamers, to examine the robustness of these preliminary findings in more ecologically valid settings.
Liu, Ruying; Wu, Wanjing; Becerik-Gerber, Burcin; Lucas, Gale M.; Laboy, Michelle; Fannon, David
Reinforcement learning for evaluating school safety designs in active shooter incidents Journal Article
In: Advanced Engineering Informatics, vol. 67, pp. 103575, 2025, ISSN: 14740346.
Links | BibTeX | Tags: DTIC, Simulation
@article{liu_reinforcement_2025,
title = {Reinforcement learning for evaluating school safety designs in active shooter incidents},
author = {Ruying Liu and Wanjing Wu and Burcin Becerik-Gerber and Gale M. Lucas and Michelle Laboy and David Fannon},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1474034625004689},
doi = {10.1016/j.aei.2025.103575},
issn = {14740346},
year  = {2025},
date = {2025-09-01},
urldate = {2025-08-19},
journal = {Advanced Engineering Informatics},
volume = {67},
pages = {103575},
keywords = {DTIC, Simulation},
pubstate = {published},
tppubtype = {article}
}
Gomez-Zaragoza, Lucia; Marin-Morales, Javier; Alcaniz, Mariano; Soleymani, Mohammed
Speech and Text Foundation Models for Depression Detection: Cross-Task and Cross-Language Evaluation Proceedings Article
In: Rotterdam, The Netherlands, 2025.
@inproceedings{gomez-zaragoza_speech_2025,
title = {Speech and Text Foundation Models for Depression Detection: Cross-Task and Cross-Language Evaluation},
author = {Lucia Gomez-Zaragoza and Javier Marin-Morales and Mariano Alcaniz and Mohammed Soleymani},
url = {https://www.isca-archive.org/interspeech_2025/gomezzaragoza25_interspeech.html#},
year  = {2025},
date = {2025-08-01},
address = {Rotterdam, The Netherlands},
keywords = {DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Rakshit, Sushrita; Hale, James; Chawla, Kushal; Brett, Jeanne M.; Gratch, Jonathan
Emotionally-Aware Agents for Dispute Resolution Miscellaneous
2025, (arXiv:2509.04465 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC
@misc{rakshit_emotionally-aware_2025,
title = {Emotionally-Aware Agents for Dispute Resolution},
author = {Sushrita Rakshit and James Hale and Kushal Chawla and Jeanne M. Brett and Jonathan Gratch},
url = {http://arxiv.org/abs/2509.04465},
doi = {10.48550/arXiv.2509.04465},
year  = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {In conflict, people use emotional expressions to shape their counterparts' thoughts, feelings, and actions. This paper explores whether automatic text emotion recognition offers insight into this influence in the context of dispute resolution. Prior work has shown the promise of such methods in negotiations; however, disputes evoke stronger emotions and different social processes. We use a large corpus of buyer-seller dispute dialogues to investigate how emotional expressions shape subjective and objective outcomes. We further demonstrate that large-language models yield considerably greater explanatory power than previous methods for emotion intensity annotation and better match the decisions of human annotators. Findings support existing theoretical models for how emotional expressions contribute to conflict escalation and resolution and suggest that agent-based systems could be useful in managing disputes by recognizing and potentially mitigating emotional escalation.},
note = {arXiv:2509.04465 [cs]},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {misc}
}
Beltz, Brandon; Doty, Jim; Fonken, Yvonne; Gurney, Nikolos; Israelsen, Brett; Lau, Nathan; Marsella, Stacy; Thomas, Rachelle; Trent, Stoney; Wu, Peggy; Yang, Ya-Ting; Zhu, Quanyan
2025, (arXiv:2508.20963 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Security
@misc{beltz_guarding_2025,
title = {Guarding Against Malicious Biased Threats (GAMBiT) Experiments: Revealing Cognitive Bias in Human-Subjects Red-Team Cyber Range Operations},
author = {Brandon Beltz and Jim Doty and Yvonne Fonken and Nikolos Gurney and Brett Israelsen and Nathan Lau and Stacy Marsella and Rachelle Thomas and Stoney Trent and Peggy Wu and Ya-Ting Yang and Quanyan Zhu},
url = {http://arxiv.org/abs/2508.20963},
doi = {10.48550/arXiv.2508.20963},
year  = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {We present three large-scale human-subjects red-team cyber range datasets from the Guarding Against Malicious Biased Threats (GAMBiT) project. Across Experiments 1-3 (July 2024-March 2025), 19-20 skilled attackers per experiment conducted two 8-hour days of self-paced operations in a simulated enterprise network (SimSpace Cyber Force Platform) while we captured multi-modal data: self-reports (background, demographics, psychometrics), operational notes, terminal histories, keylogs, network packet captures (PCAP), and NIDS alerts (Suricata). Each participant began from a standardized Kali Linux VM and pursued realistic objectives (e.g., target discovery and data exfiltration) under controlled constraints. Derivative curated logs and labels are included. The combined release supports research on attacker behavior modeling, bias-aware analytics, and method benchmarking. Data are available via IEEE Dataport entries for Experiments 1-3.},
note = {arXiv:2508.20963 [cs]},
keywords = {DTIC, Security},
pubstate = {published},
tppubtype = {misc}
}
Chen, Meida; Leal, Luis; Hu, Yue; Liu, Rong; Xiong, Butian; Feng, Andrew; Xu, Jiuyi; Shi, Yangming
IDU: Incremental Dynamic Update of Existing 3D Virtual Environments with New Imagery Data Miscellaneous
2025, (arXiv:2508.17579 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, VGL
@misc{chen_idu_2025,
title = {IDU: Incremental Dynamic Update of Existing 3D Virtual Environments with New Imagery Data},
author = {Meida Chen and Luis Leal and Yue Hu and Rong Liu and Butian Xiong and Andrew Feng and Jiuyi Xu and Yangming Shi},
url = {http://arxiv.org/abs/2508.17579},
doi = {10.48550/arXiv.2508.17579},
year  = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {For simulation and training purposes, military organizations have made substantial investments in developing high-resolution 3D virtual environments through extensive imaging and 3D scanning. However, the dynamic nature of battlefield conditions-where objects may appear or vanish over time-makes frequent full-scale updates both time-consuming and costly. In response, we introduce the Incremental Dynamic Update (IDU) pipeline, which efficiently updates existing 3D reconstructions, such as 3D Gaussian Splatting (3DGS), with only a small set of newly acquired images. Our approach starts with camera pose estimation to align new images with the existing 3D model, followed by change detection to pinpoint modifications in the scene. A 3D generative AI model is then used to create high-quality 3D assets of the new elements, which are seamlessly integrated into the existing 3D model. The IDU pipeline incorporates human guidance to ensure high accuracy in object identification and placement, with each update focusing on a single new object at a time. Experimental results confirm that our proposed IDU pipeline significantly reduces update time and labor, offering a cost-effective and targeted solution for maintaining up-to-date 3D models in rapidly evolving military scenarios.},
note = {arXiv:2508.17579 [cs]},
keywords = {DTIC, VGL},
pubstate = {published},
tppubtype = {misc}
}
Hans, Soham; Gurney, Nikolos; Marsella, Stacy; Hirschmann, Sofia
Quantifying Loss Aversion in Cyber Adversaries via LLM Analysis Miscellaneous
2025, (arXiv:2508.13240 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, LLM
@misc{hans_quantifying_2025,
title = {Quantifying Loss Aversion in Cyber Adversaries via LLM Analysis},
author = {Soham Hans and Nikolos Gurney and Stacy Marsella and Sofia Hirschmann},
url = {http://arxiv.org/abs/2508.13240},
doi = {10.48550/arXiv.2508.13240},
year  = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {Understanding and quantifying human cognitive biases from empirical data has long posed a formidable challenge, particularly in cybersecurity, where defending against unknown adversaries is paramount. Traditional cyber defense strategies have largely focused on fortification, while some approaches attempt to anticipate attacker strategies by mapping them to cognitive vulnerabilities, yet they fall short in dynamically interpreting attacks in progress. In recognition of this gap, IARPA's ReSCIND program seeks to infer, defend against, and even exploit attacker cognitive traits. In this paper, we present a novel methodology that leverages large language models (LLMs) to extract quantifiable insights into the cognitive bias of loss aversion from hacker behavior. Our data are collected from an experiment in which hackers were recruited to attack a controlled demonstration network. We process the hacker generated notes using LLMs using it to segment the various actions and correlate the actions to predefined persistence mechanisms used by hackers. By correlating the implementation of these mechanisms with various operational triggers, our analysis provides new insights into how loss aversion manifests in hacker decision-making. The results demonstrate that LLMs can effectively dissect and interpret nuanced behavioral patterns, thereby offering a transformative approach to enhancing cyber defense strategies through real-time, behavior-based analysis.},
note = {arXiv:2508.13240 [cs]},
keywords = {DTIC, LLM},
pubstate = {published},
tppubtype = {misc}
}
Wang, Zihao; Rodrigues, Patrick Borges; Fang, Yiyang; Soibelman, Lucio; Becerik-Gerber, Burcin; Roll, Shawn C.; Lucas, Gale M
Understanding Potential Challenges in Demolition Robot Teleoperation to Inform Interface Design: Insights from Industry Professionals Journal Article
In: CIB Conferences, vol. 1, no. 1, 2025, ISSN: 3067-4883.
@article{wang_understanding_2025,
title = {Understanding Potential Challenges in Demolition Robot Teleoperation to Inform Interface Design: Insights from Industry Professionals},
author = {Zihao Wang and Patrick Borges Rodrigues and Yiyang Fang and Lucio Soibelman and Burcin Becerik-Gerber and Shawn C. Roll and Gale M Lucas},
url = {https://docs.lib.purdue.edu/cib-conferences/vol1/iss1/106},
doi = {10.7771/3067-4883.2040},
issn = {3067-4883},
year  = {2025},
date = {2025-06-01},
urldate = {2025-08-19},
journal = {CIB Conferences},
volume = {1},
number = {1},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Miller, John H.; Pynadath, David V.
Exploring the choice landscape: Anchoring and framing effects on search behavior in complex choices Journal Article
In: Journal of Choice Modelling, vol. 55, pp. 100549, 2025, ISSN: 17555345.
Links | BibTeX | Tags: DTIC, Social
@article{gurney_exploring_2025,
title = {Exploring the choice landscape: Anchoring and framing effects on search behavior in complex choices},
author = {Nikolos Gurney and John H. Miller and David V. Pynadath},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1755534525000120},
doi = {10.1016/j.jocm.2025.100549},
issn = {17555345},
year  = {2025},
date = {2025-06-01},
urldate = {2025-04-15},
journal = {Journal of Choice Modelling},
volume = {55},
pages = {100549},
keywords = {DTIC, Social},
pubstate = {published},
tppubtype = {article}
}
Klumpe, Stella; Mitchell, Kelsey C.; Cox, Emma; Katz, Jeffrey S.; Lazarowski, Lucia; Deshpande, Gopikrishna; Gratch, Jonathan; Visser, Ewart J. De; Ayaz, Hasan; Li, Xingnan; Franke, Adrian A.; Krueger, Frank
Social bonding between humans, animals, and robots: Dogs outperform AIBOs, their robotic replicas, as social companions Journal Article
In: PLoS One, vol. 20, no. 6, pp. e0324312, 2025, ISSN: 1932-6203.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Agents, Virtual Humans
@article{klumpe_social_2025,
title = {Social bonding between humans, animals, and robots: Dogs outperform AIBOs, their robotic replicas, as social companions},
author = {Stella Klumpe and Kelsey C. Mitchell and Emma Cox and Jeffrey S. Katz and Lucia Lazarowski and Gopikrishna Deshpande and Jonathan Gratch and Ewart J. De Visser and Hasan Ayaz and Xingnan Li and Adrian A. Franke and Frank Krueger},
editor = {Casey R. Lynch},
url = {https://dx.plos.org/10.1371/journal.pone.0324312},
doi = {10.1371/journal.pone.0324312},
issn = {1932-6203},
year  = {2025},
date = {2025-06-01},
urldate = {2025-06-12},
journal = {PLoS One},
volume = {20},
number = {6},
pages = {e0324312},
abstract = {In the evolving landscape of technology, robots have emerged as social companions, prompting an investigation into social bonding between humans and robots. While human-animal interactions are well-studied, human-robot interactions (HRI) remain comparatively underexplored. Ethorobotics, a field of social robotic engineering based on ecology and ethology, suggests designing companion robots modeled on animal companions, which are simpler to emulate than humans. However, it is unclear whether these robots can match the social companionship provided by their original models. This study examined social bonding between humans and AIBOs, dog-inspired companion robots, compared to real dogs. Nineteen female participants engaged in 12 affiliative interactions with dogs and AIBOs across two counter-balanced, one-month bonding phases. Social bonding was assessed through urinary oxytocin (OXT) level change over an interaction, self-reported attachment using an adapted version of the Lexington Attachment to Pets Scale, and social companionship evaluations administering the Robot-Dog Questionnaire. To examine OXT level changes and self-reported attachment by comparing the two social companions, we conducted mixed-effects model analyses and planned follow-up comparisons. Frequency comparison, binary logistic regression, and thematic analysis were performed to analyze social companionship evaluations. Results revealed significant differences between dogs and AIBOs in fostering social bonds. OXT level change increased during interactions with dogs but decreased with AIBOs. Participants reported stronger attachment to dogs and rated them as better social companions. These findings highlight the current limitations of AIBOs in fostering social bonding immediately compared to dogs. Our study contributes to the growing HRI research by demonstrating an existing gap between AIBOs and dogs as social companions. It highlights the need for further investigation to understand the complexities of social bonding with companion robots, which is essential to implement successful applications for social robots in diverse domains such as the elderly and health care, education, and entertainment.},
keywords = {DTIC, Virtual Agents, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hale, James; Kim, HanMoe; Choi, Ahyoung; Gratch, Jonathan
AI-Mediated Dispute Resolution Journal Article
In: AAAI-SS, vol. 5, no. 1, pp. 67–70, 2025, ISSN: 2994-4317.
Abstract | Links | BibTeX | Tags: AI, DTIC
@article{hale_ai-mediated_2025,
title = {AI-Mediated Dispute Resolution},
author = {James Hale and HanMoe Kim and Ahyoung Choi and Jonathan Gratch},
url = {https://ojs.aaai.org/index.php/AAAI-SS/article/view/35558},
doi = {10.1609/aaaiss.v5i1.35558},
issn = {2994-4317},
year  = {2025},
date = {2025-05-01},
urldate = {2025-08-19},
journal = {AAAI-SS},
volume = {5},
number = {1},
pages = {67–70},
abstract = {We examine the effectiveness of large language model (LLM) mediations in the under-studied dispute resolution domain. We first used a new corpus of dispute resolutions, KODIS, to investigate if LLMs can correctly identify whether to intervene. We find evidence that GPT as a mediator picks up on salient aspects of a dispute, such as Frustration and whether the disputants ultimately come to a resolution or stall at an impasse — intervening significantly more so in cases of high frustration and impasse. Afterward, we ran a user study to compare GPT mediations against those of novice human mediators. We find participants agreed GPT's mediations were more likely to lead to resolution; were better positioned in the dialog; had better justification than human-crafted ones; and, on a forced choice, were generally more effective than novice human mediations.},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {article}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Int J Artif Intell Educ, 2025, ISSN: 1560-4292, 1560-4306.
Links | BibTeX | Tags: DTIC, Learning Sciences
@article{okado_how_2025,
title = {How Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
url = {https://link.springer.com/10.1007/s40593-025-00482-w},
doi = {10.1007/s40593-025-00482-w},
issn = {1560-4292, 1560-4306},
year  = {2025},
date = {2025-05-01},
urldate = {2025-06-24},
journal = {Int J Artif Intell Educ},
keywords = {DTIC, Learning Sciences},
pubstate = {published},
tppubtype = {article}
}
Core, Mark; Nye, Benjamin; Carr, Kayla; Li, Shirley; Shiel, Aaron; Auerbach, Daniel; Leeds, Andrew; Swartout, William
Usability and Preferences for a Personalized Adaptive Learning System for AI Upskilling Journal Article
In: FLAIRS, vol. 38, 2025, ISSN: 2334-0762, 2334-0754.
Abstract | Links | BibTeX | Tags: AI, DTIC
@article{core_usability_2025,
title = {Usability and Preferences for a Personalized Adaptive Learning System for AI Upskilling},
author = {Mark Core and Benjamin Nye and Kayla Carr and Shirley Li and Aaron Shiel and Daniel Auerbach and Andrew Leeds and William Swartout},
url = {https://journals.flvc.org/FLAIRS/article/view/138996},
doi = {10.32473/flairs.38.1.138996},
issn = {2334-0762, 2334-0754},
year  = {2025},
date = {2025-05-01},
urldate = {2025-05-20},
journal = {FLAIRS},
volume = {38},
abstract = {As AI tools become common across jobs and industries, it is critical to broaden education about AI beyond teaching computer scientists how to build AI systems. To expand AI education, we are researching AI for AI learning: a personalized and adaptive learning system that integrates dialog-based tutoring and gamified programming activities. To study this problem, we adapted and expanded an existing smartphone adaptive coach to develop the Game-if-AI system. Using a design-based research approach, Game-if-AI was iteratively tested and improved across four semesters of optional use in a course designed for technician-level understanding of AI: mastering programming skills to apply AI libraries and established models. In this study, we measured the interests and needs of these technical learners, based on both survey data and on how they engaged with topics in the system. Based on this data, new topics were added and the system was refined. In this paper, we report students' usability ratings for system components and student preferences based on completion rates of AI topics available each semester. Students rated the adaptive system positively overall (93% rated as a "good idea"), but more complex learning activities (tutoring dialogs, programming) were rated lower than traditional ones (e.g., multiple choice, reading). Students were most likely to master topics highly aligned to the course materials, as well as self-directed learning toward easier high-interest topics (e.g., LLM Prompting).},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Fu, Boxi; Dincer, Betul; Masur, Omkar; Faizi, David; Ravindran, Harshul; Wang, Julia; Lai, Devashish; Merchant, Chirag
Becoming Fei: An Educational Game for AI and Data Science Education for Novice Learners Book Section
In: Smith, Brian K.; Borge, Marcela (Ed.): Learning and Collaboration Technologies, vol. 15808, pp. 69–79, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-93745-3 978-3-031-93746-0, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: AI, DTIC
@incollection{smith_becoming_2025,
title = {Becoming Fei: An Educational Game for AI and Data Science Education for Novice Learners},
author = {Ning Wang and Boxi Fu and Betul Dincer and Omkar Masur and David Faizi and Harshul Ravindran and Julia Wang and Devashish Lai and Chirag Merchant},
editor = {Brian K. Smith and Marcela Borge},
url = {https://link.springer.com/10.1007/978-3-031-93746-0_6},
doi = {10.1007/978-3-031-93746-0_6},
isbn = {978-3-031-93745-3 978-3-031-93746-0},
year  = {2025},
date = {2025-05-01},
urldate = {2025-06-12},
booktitle = {Learning and Collaboration Technologies},
volume = {15808},
pages = {69–79},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {incollection}
}
Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
The Impact of Color Correlated Temperature and Illuminance Levels of Office Lighting on Stress and Cognitive Restoration Journal Article
In: Journal of Environmental Psychology, pp. 102628, 2025, ISSN: 02724944.
@article{awada_impact_2025,
title = {The Impact of Color Correlated Temperature and Illuminance Levels of Office Lighting on Stress and Cognitive Restoration},
author = {Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494425001112},
doi = {10.1016/j.jenvp.2025.102628},
issn = {02724944},
year  = {2025},
date = {2025-05-01},
urldate = {2025-05-20},
journal = {Journal of Environmental Psychology},
pages = {102628},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew
Logical Abduction as a Computational Model of Narrative Proceedings Article
In: Geneva, Switzerland, 2025.
Links | BibTeX | Tags: DTIC, Narrative
@inproceedings{gordon_andrew_logical_2025,
title = {Logical Abduction as a Computational Model of Narrative},
author = {Andrew Gordon},
url = {chrome-extension://efaidnbmnnnibpcajpcglclefindmkaj/https://asgordon.github.io/publications/CMN2025.PDF},
year  = {2025},
date = {2025-05-01},
address = {Geneva, Switzerland},
keywords = {DTIC, Narrative},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaubey, Ashutosh; Guan, Xulang; Soleymani, Mohammad
Face-LLaVA: Facial Expression and Attribute Understanding through Instruction Tuning Miscellaneous
2025, (Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, LLM
@misc{chaubey_face-llava_2025,
title = {Face-LLaVA: Facial Expression and Attribute Understanding through Instruction Tuning},
author = {Ashutosh Chaubey and Xulang Guan and Mohammad Soleymani},
url = {https://arxiv.org/abs/2504.07198},
doi = {10.48550/ARXIV.2504.07198},
year  = {2025},
date = {2025-04-01},
urldate = {2025-04-15},
publisher = {arXiv},
abstract = {The human face plays a central role in social communication, necessitating the use of performant computer vision tools for human-centered applications. We propose Face-LLaVA, a multimodal large language model for face-centered, in-context learning, including facial expression and attribute recognition. Additionally, Face-LLaVA is able to generate natural language descriptions that can be used for reasoning. Leveraging existing visual databases, we first developed FaceInstruct-1M, a face-centered database for instruction tuning MLLMs for face processing. We then developed a novel face-specific visual encoder powered by Face-Region Guided Cross-Attention that integrates face geometry with local visual features. We evaluated the proposed method across nine different datasets and five different face processing tasks, including facial expression recognition, action unit detection, facial attribute detection, age estimation and deepfake detection. Face-LLaVA achieves superior results compared to existing open-source MLLMs and competitive performance compared to commercial solutions. Our model output also receives a higher reasoning rating by GPT under a zero-shot setting across all the tasks. Both our dataset and model wil be released at https://face-llava.github.io to support future advancements in social AI and foundational vision-language research.},
note = {Version Number: 1},
keywords = {DTIC, LLM},
pubstate = {published},
tppubtype = {misc}
}
Hale, James; Rakshit, Sushrita; Chawla, Kushal; Brett, Jeanne M.; Gratch, Jonathan
KODIS: A Multicultural Dispute Resolution Dialogue Corpus Miscellaneous
2025, (arXiv:2504.12723 [cs]).
Abstract | Links | BibTeX | Tags: Dialogue, DTIC
@misc{hale_kodis_2025,
title = {KODIS: A Multicultural Dispute Resolution Dialogue Corpus},
author = {James Hale and Sushrita Rakshit and Kushal Chawla and Jeanne M. Brett and Jonathan Gratch},
url = {http://arxiv.org/abs/2504.12723},
doi = {10.48550/arXiv.2504.12723},
year  = {2025},
date = {2025-04-01},
urldate = {2025-05-20},
publisher = {arXiv},
abstract = {We present KODIS, a dyadic dispute resolution corpus containing thousands of dialogues from over 75 countries. Motivated by a theoretical model of culture and conflict, participants engage in a typical customer service dispute designed by experts to evoke strong emotions and conflict. The corpus contains a rich set of dispositional, process, and outcome measures. The initial analysis supports theories of how anger expressions lead to escalatory spirals and highlights cultural differences in emotional expression. We make this corpus and data collection framework available to the community.},
note = {arXiv:2504.12723 [cs]},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {misc}
}
Lin, Spencer; Jun, Miru; Rizk, Basem; Shieh, Karen; Fisher, Scott; Mozgai, Sharon
Optimizing SIA Development: A Case Study in User-Centered Design for Estuary, a Multimodal Socially Interactive Agent Framework Proceedings Article
In: Proceedings of the Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–9, 2025, (arXiv:2504.14427 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC
@inproceedings{lin_optimizing_2025,
title = {Optimizing SIA Development: A Case Study in User-Centered Design for Estuary, a Multimodal Socially Interactive Agent Framework},
author = {Spencer Lin and Miru Jun and Basem Rizk and Karen Shieh and Scott Fisher and Sharon Mozgai},
url = {http://arxiv.org/abs/2504.14427},
doi = {10.1145/3706599.3707399},
year  = {2025},
date = {2025-04-01},
urldate = {2025-05-20},
booktitle = {Proceedings of the Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–9},
abstract = {This case study presents our user-centered design model for Socially Intelligent Agent (SIA) development frameworks through our experience developing Estuary, an open source multimodal framework for building low-latency real-time socially interactive agents. We leverage the Rapid Assessment Process (RAP) to collect the thoughts of leading researchers in the field of SIAs regarding the current state of the art for SIA development as well as their evaluation of how well Estuary may potentially address current research gaps. We achieve this through a series of end-user interviews conducted by a fellow researcher in the community. We hope that the findings of our work will not only assist the continued development of Estuary but also guide the development of other future frameworks and technologies for SIAs.},
note = {arXiv:2504.14427 [cs]},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Siniukov, Maksim; Chang, Di; Tran, Minh; Gong, Hongkun; Chaubey, Ashutosh; Soleymani, Mohammad
DiTaiListener: Controllable High Fidelity Listener Video Generation with Diffusion Miscellaneous
2025, (Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, VGL
@misc{siniukov_ditailistener_2025,
title = {DiTaiListener: Controllable High Fidelity Listener Video Generation with Diffusion},
author = {Maksim Siniukov and Di Chang and Minh Tran and Hongkun Gong and Ashutosh Chaubey and Mohammad Soleymani},
url = {https://arxiv.org/abs/2504.04010},
doi = {10.48550/ARXIV.2504.04010},
year  = {2025},
date = {2025-03-01},
urldate = {2025-04-15},
publisher = {arXiv},
abstract = {Generating naturalistic and nuanced listener motions for extended interactions remains an open problem. Existing methods often rely on low-dimensional motion codes for facial behavior generation followed by photorealistic rendering, limiting both visual fidelity and expressive richness. To address these challenges, we introduce DiTaiListener, powered by a video diffusion model with multimodal conditions. Our approach first generates short segments of listener responses conditioned on the speaker's speech and facial motions with DiTaiListener-Gen. It then refines the transitional frames via DiTaiListener-Edit for a seamless transition. Specifically, DiTaiListener-Gen adapts a Diffusion Transformer (DiT) for the task of listener head portrait generation by introducing a Causal Temporal Multimodal Adapter (CTM-Adapter) to process speakers' auditory and visual cues. CTM-Adapter integrates speakers' input in a causal manner into the video generation process to ensure temporally coherent listener responses. For long-form video generation, we introduce DiTaiListener-Edit, a transition refinement video-to-video diffusion model. The model fuses video segments into smooth and continuous videos, ensuring temporal consistency in facial expressions and image quality when merging short video segments produced by DiTaiListener-Gen. Quantitatively, DiTaiListener achieves the state-of-the-art performance on benchmark datasets in both photorealism (+73.8% in FID on RealTalk) and motion representation (+6.1% in FD metric on VICO) spaces. User studies confirm the superior performance of DiTaiListener, with the model being the clear preference in terms of feedback, diversity, and smoothness, outperforming competitors by a significant margin.},
note = {Version Number: 1},
keywords = {DTIC, VGL},
pubstate = {published},
tppubtype = {misc}
}
Gurney, Nikolos; Pynadath, David V.; Miller, John H.
Willingness to work as a predictor of human-agent team success Journal Article
In: Front. Comput. Sci., vol. 7, pp. 1405436, 2025, ISSN: 2624-9898.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Agents
@article{gurney_willingness_2025,
title = {Willingness to work as a predictor of human-agent team success},
author = {Nikolos Gurney and David V. Pynadath and John H. Miller},
url = {https://www.frontiersin.org/articles/10.3389/fcomp.2025.1405436/full},
doi = {10.3389/fcomp.2025.1405436},
issn = {2624-9898},
year  = {2025},
date = {2025-03-01},
urldate = {2025-04-15},
journal = {Front. Comput. Sci.},
volume = {7},
pages = {1405436},
abstract = {Research shows that the effectiveness of human-agent teams depends heavily on human team members' prior experiences, whether from direct teaming activities or relevant domain knowledge. While researchers have proposed various mechanisms to explain this relationship, we present a simpler alternative explanation: experience serves primarily as an indicator of a person's fundamental willingness to engage in teaming tasks. We introduce a measure called “willingness to work” that quantifies this underlying disposition. Our empirical analysis demonstrates that this straightforward metric robustly predicts human-agent team performance. Beyond its practical value as a predictive tool, this reconceptualization of the experience-performance relationship necessitates a fresh examination of existing findings in the field. The results suggest that a team member's basic willingness to invest effort may be more fundamental to success than previously recognized mechanisms.},
keywords = {DTIC, Virtual Agents},
pubstate = {published},
tppubtype = {article}
}
Ustun, Volkan; Hans, Soham; Kumar, Rajay; Wang, Yunzhe
Abstracting Geo-specific Terrains to Scale Up Reinforcement Learning Miscellaneous
2025, (arXiv:2503.20078 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Simulation
@misc{ustun_abstracting_2025,
title = {Abstracting Geo-specific Terrains to Scale Up Reinforcement Learning},
author = {Volkan Ustun and Soham Hans and Rajay Kumar and Yunzhe Wang},
url = {http://arxiv.org/abs/2503.20078},
doi = {10.48550/arXiv.2503.20078},
year  = {2025},
date = {2025-03-01},
urldate = {2025-04-15},
publisher = {arXiv},
abstract = {Multi-agent reinforcement learning (MARL) is increasingly ubiquitous in training dynamic and adaptive synthetic characters for interactive simulations on geo-specific terrains. Frameworks such as Unity's ML-Agents help to make such reinforcement learning experiments more accessible to the simulation community. Military training simulations also benefit from advances in MARL, but they have immense computational requirements due to their complex, continuous, stochastic, partially observable, non-stationary, and doctrine-based nature. Furthermore, these simulations require geo-specific terrains, further exacerbating the computational resources problem. In our research, we leverage Unity's waypoints to automatically generate multi-layered representation abstractions of the geo-specific terrains to scale up reinforcement learning while still allowing the transfer of learned policies between different representations. Our early exploratory results on a novel MARL scenario, where each side has differing objectives, indicate that waypoint-based navigation enables faster and more efficient learning while producing trajectories similar to those taken by expert human players in CSGO gaming environments. This research points out the potential of waypoint-based navigation for reducing the computational costs of developing and training MARL models for military training simulations, where geo-specific terrains and differing objectives are crucial.},
note = {arXiv:2503.20078 [cs]},
keywords = {DTIC, Simulation},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Becerik-Gerber, Burcin; Pynadath, David V.; Marti, Deniz; Lucas, Gale M.
Elicitation and verification of learning via experts (EVOLVE) for creating a theoretical framework for active shooter incidents Journal Article
In: Developments in the Built Environment, vol. 21, pp. 100635, 2025, ISSN: 26661659.
Links | BibTeX | Tags: DTIC, Social Simulation
@article{liu_elicitation_2025,
title = {Elicitation and verification of learning via experts (EVOLVE) for creating a theoretical framework for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and David V. Pynadath and Deniz Marti and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2666165925000353},
doi = {10.1016/j.dibe.2025.100635},
issn = {26661659},
year  = {2025},
date = {2025-03-01},
urldate = {2025-03-18},
journal = {Developments in the Built Environment},
volume = {21},
pages = {100635},
keywords = {DTIC, Social Simulation},
pubstate = {published},
tppubtype = {article}
}
Jalal-Kamali, Ali; Gurney, Nikolos; Pynadath, David
Predicting Team Performance from Communications in Simulated Search-and-Rescue Miscellaneous
2025, (arXiv:2503.03791 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC
@misc{jalal-kamali_predicting_2025,
title = {Predicting Team Performance from Communications in Simulated Search-and-Rescue},
author = {Ali Jalal-Kamali and Nikolos Gurney and David Pynadath},
url = {http://arxiv.org/abs/2503.03791},
doi = {10.48550/arXiv.2503.03791},
year  = {2025},
date = {2025-03-01},
urldate = {2025-03-18},
publisher = {arXiv},
abstract = {Understanding how individual traits influence team performance is valuable, but these traits are not always directly observable. Prior research has inferred traits like trust from behavioral data. We analyze conversational data to identify team traits and their correlation with teaming outcomes. Using transcripts from a Minecraft-based search-and-rescue experiment, we apply topic modeling and clustering to uncover key interaction patterns. Our findings show that variations in teaming outcomes can be explained through these inferences, with different levels of predictive power derived from individual traits and team dynamics.},
note = {arXiv:2503.03791 [cs]},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {misc}
}
Kwon, Deuksin; Hae, Jiwon; Clift, Emma; Shamsoddini, Daniel; Gratch, Jonathan; Lucas, Gale M.
ASTRA: A Negotiation Agent with Adaptive and Strategic Reasoning through Action in Dynamic Offer Optimization Miscellaneous
2025, (arXiv:2503.07129 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Virtual Agents
@misc{kwon_astra_2025,
title = {ASTRA: A Negotiation Agent with Adaptive and Strategic Reasoning through Action in Dynamic Offer Optimization},
author = {Deuksin Kwon and Jiwon Hae and Emma Clift and Daniel Shamsoddini and Jonathan Gratch and Gale M. Lucas},
url = {http://arxiv.org/abs/2503.07129},
doi = {10.48550/arXiv.2503.07129},
year  = {2025},
date = {2025-03-01},
urldate = {2025-03-18},
publisher = {arXiv},
abstract = {Negotiation requires dynamically balancing self-interest and cooperation to maximize one's own utility. Yet, existing agents struggle due to bounded rationality in human data, low adaptability to counterpart behavior, and limited strategic reasoning. To address this, we introduce principle-driven negotiation agents, powered by ASTRA, a novel framework for turn-level offer optimization grounded in two core principles: opponent modeling and Tit-for-Tat reciprocity. ASTRA operates in three stages: (1) interpreting counterpart behavior, (2) optimizing counteroffers via a linear programming (LP) solver, and (3) selecting offers based on negotiation tactics and the partner's acceptance probability. Through simulations and human evaluations, our agent effectively adapts to an opponent's shifting stance and achieves favorable outcomes through enhanced adaptability and strategic reasoning. Beyond improving negotiation performance, it also serves as a powerful coaching tool, offering interpretable strategic feedback and optimal offer recommendations.},
note = {arXiv:2503.07129 [cs]},
keywords = {DTIC, Virtual Agents},
pubstate = {published},
tppubtype = {misc}
}
Fonseca, Henrique Correia Da; Melo, Celso M. De; Terada, Kazunori; Gratch, Jonathan; Paiva, Ana S.; Santos, Francisco C.
Evolution of indirect reciprocity under emotion expression Journal Article
In: Sci Rep, vol. 15, no. 1, pp. 9151, 2025, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags: DTIC
@article{correia_da_fonseca_evolution_2025,
title = {Evolution of indirect reciprocity under emotion expression},
author = {Henrique Correia Da Fonseca and Celso M. De Melo and Kazunori Terada and Jonathan Gratch and Ana S. Paiva and Francisco C. Santos},
url = {https://www.nature.com/articles/s41598-025-89588-8},
doi = {10.1038/s41598-025-89588-8},
issn = {2045-2322},
year  = {2025},
date = {2025-03-01},
urldate = {2025-03-20},
journal = {Sci Rep},
volume = {15},
number = {1},
pages = {9151},
abstract = {Abstract 
 Do emotion expressions impact the evolution of cooperation? Indirect Reciprocity offers a solution to the cooperation dilemma with prior work focusing on the role of social norms in propagating others’ reputations and contributing to evolutionarily stable cooperation. Recent experimental studies, however, show that emotion expressions shape pro-social behaviour, communicate one’s intentions to others, and serve an error-correcting function; yet, the role of emotion signals in the evolution of cooperation remains unexplored. We present the first model of IR based on evolutionary game theory that exposes how emotion expressions positively influence the evolution of cooperation, particularly in scenarios of frequent errors. Our findings provide evolutionary support for the existence of emotion-based social norms, which help foster cooperation among unrelated individuals.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Do emotion expressions impact the evolution of cooperation? Indirect Reciprocity offers a solution to the cooperation dilemma with prior work focusing on the role of social norms in propagating others’ reputations and contributing to evolutionarily stable cooperation. Recent experimental studies, however, show that emotion expressions shape pro-social behaviour, communicate one’s intentions to others, and serve an error-correcting function; yet, the role of emotion signals in the evolution of cooperation remains unexplored. We present the first model of IR based on evolutionary game theory that exposes how emotion expressions positively influence the evolution of cooperation, particularly in scenarios of frequent errors. Our findings provide evolutionary support for the existence of emotion-based social norms, which help foster cooperation among unrelated individuals.
Liu, Ruying; Becerik-Gerber, Burçin; Lucas, Gale M.
Investigating Role of Personal Factors in Shaping Responses to Active Shooter Incident using Machine Learning Miscellaneous
2025, (arXiv:2503.05719 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, VR
@misc{liu_investigating_2025,
title = {Investigating Role of Personal Factors in Shaping Responses to Active Shooter Incident using Machine Learning},
author = {Ruying Liu and Burçin Becerik-Gerber and Gale M. Lucas},
url = {http://arxiv.org/abs/2503.05719},
doi = {10.48550/arXiv.2503.05719},
year  = {2025},
date = {2025-02-01},
urldate = {2025-03-18},
publisher = {arXiv},
abstract = {This study bridges the knowledge gap on how personal factors affect building occupants' responses in active shooter situations by applying interpretable machine learning methods to data from 107 participants. The personal factors studied are training methods, prior training experience, sense of direction, and gender. The response performance measurements consist of decisions (run, hide, multiple), vulnerability (corresponding to the time a participant is visible to a shooter), and pre-evacuation time. The results indicate that the propensity to run significantly determines overall response strategies, overshadowing vulnerability, and pre-evacuation time. The training method is a critical factor where VR-based training leads to better responses than video-based training. A better sense of direction and previous training experience are correlated with a greater propensity to run and less vulnerability. Gender slightly influences decisions and vulnerability but significantly impacts pre-evacuation time, with females evacuating slower, potentially due to higher risk perception. This study underscores the importance of personal factors in shaping responses to active shooter incidents.},
note = {arXiv:2503.05719 [cs]},
keywords = {DTIC, Social Simulation, VR},
pubstate = {published},
tppubtype = {misc}
}
Tak, Ala N.; Banayeeanzade, Amin; Bolourani, Anahita; Kian, Mina; Jia, Robin; Gratch, Jonathan
Mechanistic Interpretability of Emotion Inference in Large Language Models Miscellaneous
2025, (arXiv:2502.05489 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, LLM
@misc{tak_mechanistic_2025,
title = {Mechanistic Interpretability of Emotion Inference in Large Language Models},
author = {Ala N. Tak and Amin Banayeeanzade and Anahita Bolourani and Mina Kian and Robin Jia and Jonathan Gratch},
url = {http://arxiv.org/abs/2502.05489},
doi = {10.48550/arXiv.2502.05489},
year  = {2025},
date = {2025-02-01},
urldate = {2025-02-20},
publisher = {arXiv},
abstract = {Large language models (LLMs) show promising capabilities in predicting human emotions from text. However, the mechanisms through which these models process emotional stimuli remain largely unexplored. Our study addresses this gap by investigating how autoregressive LLMs infer emotions, showing that emotion representations are functionally localized to specific regions in the model. Our evaluation includes diverse model families and sizes and is supported by robustness checks. We then show that the identified representations are psychologically plausible by drawing on cognitive appraisal theory, a well-established psychological framework positing that emotions emerge from evaluations (appraisals) of environmental stimuli. By causally intervening on construed appraisal concepts, we steer the generation and show that the outputs align with theoretical and intuitive expectations. This work highlights a novel way to causally intervene and precisely shape emotional text generation, potentially benefiting safety and alignment in sensitive affective domains.},
note = {arXiv:2502.05489 [cs]},
keywords = {DTIC, LLM},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.; Busta, Kelly
Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities Journal Article
In: International Journal of Disaster Risk Reduction, vol. 118, pp. 105225, 2025, ISSN: 22124209.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{liu_impact_2025,
title = {Impact of behavior-based virtual training on active shooter incident preparedness in healthcare facilities},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas and Kelly Busta},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2212420925000494},
doi = {10.1016/j.ijdrr.2025.105225},
issn = {22124209},
year  = {2025},
date = {2025-02-01},
urldate = {2025-02-20},
journal = {International Journal of Disaster Risk Reduction},
volume = {118},
pages = {105225},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Terada, Kazunori; Melo, Celso De; Santos, Francisco C.; Gratch, Jonathan
A Bayesian Model of Mind Reading from Decisions and Emotions in Social Dilemmas Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. 47, 2025.
Links | BibTeX | Tags: DTIC, Emotions
@article{terada_bayesian_2025,
title = {A Bayesian Model of Mind Reading from Decisions and Emotions in Social Dilemmas},
author = {Kazunori Terada and Celso De Melo and Francisco C. Santos and Jonathan Gratch},
url = {escholarship.org/uc/item/12f7f7f8#main},
year  = {2025},
date = {2025-01-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {47},
keywords = {DTIC, Emotions},
pubstate = {published},
tppubtype = {article}
}
Walsh, Joel; Mamidanna, Siddarth; Nye, Benjamin; Core, Mark; Auerbach, Daniel
Fine-tuning for Better Few Shot Prompting: An Empirical Comparison for Short Answer Grading Miscellaneous
2025, (Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, Machine Learning
@misc{walsh_fine-tuning_2025,
title = {Fine-tuning for Better Few Shot Prompting: An Empirical Comparison for Short Answer Grading},
author = {Joel Walsh and Siddarth Mamidanna and Benjamin Nye and Mark Core and Daniel Auerbach},
url = {https://arxiv.org/abs/2508.04063},
doi = {10.48550/ARXIV.2508.04063},
year  = {2025},
date = {2025-01-01},
urldate = {2025-08-19},
publisher = {arXiv},
abstract = {Research to improve Automated Short Answer Grading has recently focused on Large Language Models (LLMs) with prompt engineering and no- or few-shot prompting to achieve best results. This is in contrast to the fine-tuning approach, which has historically required large-scale compute clusters inaccessible to most users. New closed-model approaches such as OpenAI's fine-tuning service promise results with as few as 100 examples, while methods using open weights such as quantized low-rank adaptive (QLORA) can be used to fine-tune models on consumer GPUs. We evaluate both of these fine-tuning methods, measuring their interaction with few-shot prompting for automated short answer grading (ASAG) with structured (JSON) outputs. Our results show that finetuning with small amounts of data has limited utility for Llama open-weight models, but that fine-tuning methods can outperform few-shot baseline instruction-tuned LLMs for OpenAI's closed models. While our evaluation set is limited, we find some evidence that the observed benefits of finetuning may be impacted by the domain subject matter. Lastly, we observed dramatic improvement with the LLama 3.1 8B-Instruct open-weight model by seeding the initial training examples with a significant amount of cheaply generated synthetic training data.},
note = {Version Number: 1},
keywords = {DTIC, Machine Learning},
pubstate = {published},
tppubtype = {misc}
}
Behzad, Tina; Gurney, Nikolos; Wang, Ning; Pynadath, David V.
Beyond Predictions: A Study of AI Strength and Weakness Transparency Communication on Human-AI Collaboration Miscellaneous
2025, (Version Number: 1).
Abstract | Links | BibTeX | Tags: AI, DTIC
@misc{behzad_beyond_2025,
title = {Beyond Predictions: A Study of AI Strength and Weakness Transparency Communication on Human-AI Collaboration},
author = {Tina Behzad and Nikolos Gurney and Ning Wang and David V. Pynadath},
url = {https://arxiv.org/abs/2508.09033},
doi = {10.48550/ARXIV.2508.09033},
year  = {2025},
date = {2025-01-01},
urldate = {2025-08-19},
publisher = {arXiv},
abstract = {The promise of human-AI teaming lies in humans and AI working together to achieve performance levels neither could accomplish alone. Effective communication between AI and humans is crucial for teamwork, enabling users to efficiently benefit from AI assistance. This paper investigates how AI communication impacts human-AI team performance. We examine AI explanations that convey an awareness of its strengths and limitations. To achieve this, we train a decision tree on the model's mistakes, allowing it to recognize and explain where and why it might err. Through a user study on an income prediction task, we assess the impact of varying levels of information and explanations about AI predictions. Our results show that AI performance insights enhance task performance, and conveying AI awareness of its strengths and weaknesses improves trust calibration. These findings highlight the importance of considering how information delivery influences user trust and reliance in AI-assisted decision-making.},
note = {Version Number: 1},
keywords = {AI, DTIC},
pubstate = {published},
tppubtype = {misc}
}
Rizzo, Albert; Mozgai, Sharon; Sigaras, Alexandros; Rubin, John E.; Jotwani, Rohan
In: Journal of Medical Extended Reality, vol. 2, no. 1, pp. 209–222, 2025, (_eprint: https://www.liebertpub.com/doi/pdf/10.1177/29941520251369450).
Abstract | Links | BibTeX | Tags: DTIC, MedVR
@article{rizzo_expert_2025,
title = {Expert Consensus Best Practices for the Safe, Ethical, and Effective Design and Implementation of Artificially Intelligent Conversational Agent (i.e., Chatbot/Virtual Human) Systems in Health Care Applications},
author = {Albert Rizzo and Sharon Mozgai and Alexandros Sigaras and John E. Rubin and Rohan Jotwani},
url = {https://www.liebertpub.com/doi/abs/10.1177/29941520251369450},
doi = {10.1177/29941520251369450},
year  = {2025},
date = {2025-01-01},
journal = {Journal of Medical Extended Reality},
volume = {2},
number = {1},
pages = {209–222},
abstract = {The integration of artificially intelligent conversational agents (AICAs), variously referred to as chatbots and virtual humans (VHs), is transforming health care delivery and education. This article explores our perspective on best practices for the evolution, potential, and ethical considerations of AICAs in clinical and educational contexts. Early applications of simulation technology in health care focused on productivity improvements, teletherapy, and virtual reality therapy applications. Recent technological advancements have enabled the development of high-fidelity extended reality systems and AICAs capable of engaging users in credible interactions. These systems leverage natural language processing, machine learning, large language models, and advanced VH authoring software to create interactive, personalized, and engaging experiences. Recent efforts in the creation of AICAs suggest significant potential benefits, including enhanced patient engagement, improved access to self-care resources, and low-stigma interaction environments. They have demonstrated promise in mental health support, providing a sense of safety and encouraging open disclosure. However, the rapid adoption of AICAs raises critical challenges, including safeguarding user privacy, ensuring system reliability, and addressing ethical concerns. Incidents of harm, such as inappropriate interactions and psychological distress, highlight the need for rigorous design and implementation best practices. This article outlines key principles for developing safe, effective, and equitable AICAs, emphasizing transparency in artificial intelligence (AI) identity, accountability, cultural sensitivity, and informed consent. Additionally, the authors advocate for robust privacy measures, adaptive learning capabilities, and evidence-based content validation to optimize user experience and maintain trust. To mitigate risks, a “human-in-the-loop” approach is recommended, ensuring health care professionals oversee AI-supported decisions. By adhering to these best practices, AICAs can enhance health care accessibility, support clinical training, and complement human professionals. This work aims to provide a foundation for the ethical and effective integration of AICAs, maximizing their potential while minimizing risks, ultimately advancing patient care and education in the digital age.},
note = {_eprint: https://www.liebertpub.com/doi/pdf/10.1177/29941520251369450},
keywords = {DTIC, MedVR},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
SetPeER: Set-Based Personalized Emotion Recognition With Weak Supervision Journal Article
In: IEEE Trans. Affective Comput., pp. 1–15, 2025, ISSN: 1949-3045, 2371-9850.
Links | BibTeX | Tags: DTIC, Emotion
@article{tran_setpeer_2025,
title = {SetPeER: Set-Based Personalized Emotion Recognition With Weak Supervision},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10993348/},
doi = {10.1109/TAFFC.2025.3568024},
issn = {1949-3045, 2371-9850},
year  = {2025},
date = {2025-01-01},
urldate = {2025-05-20},
journal = {IEEE Trans. Affective Comput.},
pages = {1–15},
keywords = {DTIC, Emotion},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Hurt, Timothy; Krakowski, Ari; Greenwald, Eric; Hammerman, Jim; Santos, Sabrina De Los; Masur, Omkar; Fu, Boxi; Merchant, Chirag
Virtually Human: An Exhibit for Public AI Education Book Section
In: Stephanidis, Constantine; Antona, Margherita; Ntoa, Stavroula; Salvendy, Gavriel (Ed.): HCI International 2025 Posters, vol. 2529, pp. 436–443, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-94170-2 978-3-031-94171-9, (Series Title: Communications in Computer and Information Science).
@incollection{stephanidis_virtually_2025,
title = {Virtually Human: An Exhibit for Public AI Education},
author = {Ning Wang and Timothy Hurt and Ari Krakowski and Eric Greenwald and Jim Hammerman and Sabrina De Los Santos and Omkar Masur and Boxi Fu and Chirag Merchant},
editor = {Constantine Stephanidis and Margherita Antona and Stavroula Ntoa and Gavriel Salvendy},
url = {https://link.springer.com/10.1007/978-3-031-94171-9_42},
doi = {10.1007/978-3-031-94171-9_42},
isbn = {978-3-031-94170-2 978-3-031-94171-9},
year  = {2025},
date = {2025-01-01},
urldate = {2025-06-17},
booktitle = {HCI International 2025 Posters},
volume = {2529},
pages = {436–443},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {DTIC},
pubstate = {published},
tppubtype = {incollection}
}
Liu, Rong; Sun, Dylan; Chen, Meida; Wang, Yue; Feng, Andrew
Deformable Beta Splatting Miscellaneous
2025, (arXiv:2501.18630 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Narrative
@misc{liu_deformable_2025,
title = {Deformable Beta Splatting},
author = {Rong Liu and Dylan Sun and Meida Chen and Yue Wang and Andrew Feng},
url = {http://arxiv.org/abs/2501.18630},
doi = {10.48550/arXiv.2501.18630},
year  = {2025},
date = {2025-01-01},
urldate = {2025-02-20},
publisher = {arXiv},
abstract = {3D Gaussian Splatting (3DGS) has advanced radiance field reconstruction by enabling real-time rendering. However, its reliance on Gaussian kernels for geometry and low-order Spherical Harmonics (SH) for color encoding limits its ability to capture complex geometries and diverse colors. We introduce Deformable Beta Splatting (DBS), a deformable and compact approach that enhances both geometry and color representation. DBS replaces Gaussian kernels with deformable Beta Kernels, which offer bounded support and adaptive frequency control to capture fine geometric details with higher fidelity while achieving better memory efficiency. In addition, we extended the Beta Kernel to color encoding, which facilitates improved representation of diffuse and specular components, yielding superior results compared to SH-based methods. Furthermore, Unlike prior densification techniques that depend on Gaussian properties, we mathematically prove that adjusting regularized opacity alone ensures distribution-preserved Markov chain Monte Carlo (MCMC), independent of the splatting kernel type. Experimental results demonstrate that DBS achieves state-of-the-art visual quality while utilizing only 45% of the parameters and rendering 1.5x faster than 3DGS-based methods. Notably, for the first time, splatting-based methods outperform state-of-the-art Neural Radiance Fields, highlighting the superior performance and efficiency of DBS for real-time radiance field rendering.},
note = {arXiv:2501.18630 [cs]},
keywords = {DTIC, Narrative},
pubstate = {published},
tppubtype = {misc}
}
Chang, Di; Xu, Hongyi; Xie, You; Gao, Yipeng; Kuang, Zhengfei; Cai, Shengqu; Zhang, Chenxu; Song, Guoxian; Wang, Chao; Shi, Yichun; Chen, Zeyuan; Zhou, Shijie; Luo, Linjie; Wetzstein, Gordon; Soleymani, Mohammad
X-Dyna: Expressive Dynamic Human Image Animation Miscellaneous
2025, (arXiv:2501.10021 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, VGL
@misc{chang_x-dyna_2025,
title = {X-Dyna: Expressive Dynamic Human Image Animation},
author = {Di Chang and Hongyi Xu and You Xie and Yipeng Gao and Zhengfei Kuang and Shengqu Cai and Chenxu Zhang and Guoxian Song and Chao Wang and Yichun Shi and Zeyuan Chen and Shijie Zhou and Linjie Luo and Gordon Wetzstein and Mohammad Soleymani},
url = {http://arxiv.org/abs/2501.10021},
doi = {10.48550/arXiv.2501.10021},
year  = {2025},
date = {2025-01-01},
urldate = {2025-02-20},
publisher = {arXiv},
abstract = {We introduce X-Dyna, a novel zero-shot, diffusion-based pipeline for animating a single human image using facial expressions and body movements derived from a driving video, that generates realistic, context-aware dynamics for both the subject and the surrounding environment. Building on prior approaches centered on human pose control, X-Dyna addresses key shortcomings causing the loss of dynamic details, enhancing the lifelike qualities of human video animations. At the core of our approach is the Dynamics-Adapter, a lightweight module that effectively integrates reference appearance context into the spatial attentions of the diffusion backbone while preserving the capacity of motion modules in synthesizing fluid and intricate dynamic details. Beyond body pose control, we connect a local control module with our model to capture identity-disentangled facial expressions, facilitating accurate expression transfer for enhanced realism in animated scenes. Together, these components form a unified framework capable of learning physical human motion and natural scene dynamics from a diverse blend of human and scene videos. Comprehensive qualitative and quantitative evaluations demonstrate that X-Dyna outperforms state-of-the-art methods, creating highly lifelike and expressive animations. The code is available at https://github.com/bytedance/X-Dyna.},
note = {arXiv:2501.10021 [cs]},
keywords = {DTIC, VGL},
pubstate = {published},
tppubtype = {misc}
}
Rodrigues, Patrick Borges; Becerik-Gerber, Burcin; Soibelman, Lucio; Lucas, Gale M.; Roll, Shawn C.
Impact of selective environmental sound attenuation on operator performance, stress, attention, and task engagement in teleoperated demolition Journal Article
In: Automation in Construction, vol. 169, pp. 105876, 2025, ISSN: 09265805.
@article{rodrigues_impact_2025,
title = {Impact of selective environmental sound attenuation on operator performance, stress, attention, and task engagement in teleoperated demolition},
author = {Patrick Borges Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0926580524006125},
doi = {10.1016/j.autcon.2024.105876},
issn = {09265805},
year  = {2025},
date = {2025-01-01},
urldate = {2024-12-20},
journal = {Automation in Construction},
volume = {169},
pages = {105876},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Siniukov, Maksim; Xing, Ellie; Sanaz,; Isfahani, Attaripour; Soleymani, Mohammad
Towards a Generalizable Speech Marker for Parkinson's Disease Diagnosis Miscellaneous
2025, (Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC
@misc{siniukov_towards_2025,
title = {Towards a Generalizable Speech Marker for Parkinson's Disease Diagnosis},
author = {Maksim Siniukov and Ellie Xing and Sanaz and Attaripour Isfahani and Mohammad Soleymani},
url = {https://arxiv.org/abs/2501.03581},
doi = {10.48550/ARXIV.2501.03581},
year  = {2025},
date = {2025-01-01},
urldate = {2025-01-14},
publisher = {arXiv},
abstract = {Parkinson's Disease (PD) is a neurodegenerative disorder characterized by motor symptoms, including altered voice production in the early stages. Early diagnosis is crucial not only to improve PD patients' quality of life but also to enhance the efficacy of potential disease-modifying therapies during early neurodegeneration, a window often missed by current diagnostic tools. In this paper, we propose a more generalizable approach to PD recognition through domain adaptation and self-supervised learning. We demonstrate the generalization capabilities of the proposed approach across diverse datasets in different languages. Our approach leverages HuBERT, a large deep neural network originally trained for speech recognition and further trains it on unlabeled speech data from a population that is similar to the target group, i.e., the elderly, in a self-supervised manner. The model is then fine-tuned and adapted for use across different datasets in multiple languages, including English, Italian, and Spanish. Evaluations on four publicly available PD datasets demonstrate the model's efficacy, achieving an average specificity of 92.1% and an average sensitivity of 91.2%. This method offers objective and consistent evaluations across large populations, addressing the variability inherent in human assessments and providing a non-invasive, cost-effective and accessible diagnostic option.},
note = {Version Number: 1},
keywords = {DTIC},
pubstate = {published},
tppubtype = {misc}
}
2024
Tran, Minh; Chang, Di; Siniukov, Maksim; Soleymani, Mohammad
DIM: Dyadic Interaction Modeling for Social Behavior Generation Book Section
In: Leonardis, Aleš; Ricci, Elisa; Roth, Stefan; Russakovsky, Olga; Sattler, Torsten; Varol, Gül (Ed.): Computer Vision – ECCV 2024, vol. 15095, pp. 484–503, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-72912-6 978-3-031-72913-3, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: DTIC, Social
@incollection{leonardis_dim_2024,
title = {DIM: Dyadic Interaction Modeling for Social Behavior Generation},
author = {Minh Tran and Di Chang and Maksim Siniukov and Mohammad Soleymani},
editor = {Aleš Leonardis and Elisa Ricci and Stefan Roth and Olga Russakovsky and Torsten Sattler and Gül Varol},
url = {https://link.springer.com/10.1007/978-3-031-72913-3_27},
doi = {10.1007/978-3-031-72913-3_27},
isbn = {978-3-031-72912-6 978-3-031-72913-3},
year  = {2024},
date = {2024-12-01},
urldate = {2025-01-16},
booktitle = {Computer Vision – ECCV 2024},
volume = {15095},
pages = {484–503},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {DTIC, Social},
pubstate = {published},
tppubtype = {incollection}
}
Xu, Jiuyi; Chen, Meida; Feng, Andrew; Yu, Zifan; Shi, Yangming
Open-Vocabulary High-Resolution 3D (OVHR3D) Data Segmentation and Annotation Framework Journal Article
In: 2024, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: DTIC, Narrative
@article{xu_open-vocabulary_2024,
title = {Open-Vocabulary High-Resolution 3D (OVHR3D) Data Segmentation and Annotation Framework},
author = {Jiuyi Xu and Meida Chen and Andrew Feng and Zifan Yu and Yangming Shi},
url = {https://arxiv.org/abs/2412.06268},
doi = {10.48550/ARXIV.2412.06268},
year  = {2024},
date = {2024-12-01},
urldate = {2024-12-20},
abstract = {In the domain of the U.S. Army modeling and simulation, the availability of high quality annotated 3D data is pivotal to creating virtual environments for training and simulations. Traditional methodologies for 3D semantic and instance segmentation, such as KpConv, RandLA, Mask3D, etc., are designed to train on extensive labeled datasets to obtain satisfactory performance in practical tasks. This requirement presents a significant challenge, given the inherent scarcity of manually annotated 3D datasets, particularly for the military use cases. Recognizing this gap, our previous research leverages the One World Terrain data repository manually annotated databases, as showcased at IITSEC 2019 and 2021, to enrich the training dataset for deep learning models. However, collecting and annotating large scale 3D data for specific tasks remains costly and inefficient. To this end, the objective of this research is to design and develop a comprehensive and efficient framework for 3D segmentation tasks to assist in 3D data annotation. This framework integrates Grounding DINO and Segment anything Model, augmented by an enhancement in 2D image rendering via 3D mesh. Furthermore, the authors have also developed a user friendly interface that facilitates the 3D annotation process, offering intuitive visualization of rendered images and the 3D point cloud.},
note = {Publisher: arXiv 
Version Number: 2},
keywords = {DTIC, Narrative},
pubstate = {published},
tppubtype = {article}
}
Roemmele, Melissa; Gordon, Andrew
From Test-Taking to Test-Making: Examining LLM Authoring of Commonsense Assessment Items Proceedings Article
In: Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 5193–5203, Association for Computational Linguistics, Miami, Florida, USA, 2024.
Links | BibTeX | Tags: DTIC, Learning Sciences
@inproceedings{roemmele_test-taking_2024,
title = {From Test-Taking to Test-Making: Examining LLM Authoring of Commonsense Assessment Items},
author = {Melissa Roemmele and Andrew Gordon},
url = {https://aclanthology.org/2024.findings-emnlp.299},
doi = {10.18653/v1/2024.findings-emnlp.299},
year  = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
booktitle = {Findings of the Association for Computational Linguistics: EMNLP 2024},
pages = {5193–5203},
publisher = {Association for Computational Linguistics},
address = {Miami, Florida, USA},
keywords = {DTIC, Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Loucks, Laura; Rizzo, Albert; Rothbaum, Barbara O.
Virtual Reality Exposure for Treating PTSD Due to Military Sexual Trauma Journal Article
In: J Clin Psychol, pp. jclp.23750, 2024, ISSN: 0021-9762, 1097-4679.
Abstract | Links | BibTeX | Tags: DTIC, MedVR
@article{loucks_virtual_2024,
title = {Virtual Reality Exposure for Treating PTSD Due to Military Sexual Trauma},
author = {Laura Loucks and Albert Rizzo and Barbara O. Rothbaum},
url = {https://onlinelibrary.wiley.com/doi/10.1002/jclp.23750},
doi = {10.1002/jclp.23750},
issn = {0021-9762, 1097-4679},
year  = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {J Clin Psychol},
pages = {jclp.23750},
abstract = {ABSTRACT 
 Virtual reality exposure therapy (VRE) has been used in the treatment of combat‐related PTSD since the late 1990s and was recently adapted to treat PTSD due to military sexual trauma (MST). With content specifically tailored to MST‐related contexts, we present the case study of a military veteran who participated in the open clinical trial examining the feasibility of VRE in the treatment of MST‐related PTSD (Loucks et al. 2019). We illustrate VRE's use in activating the trauma memory to facilitate therapeutic emotional processing across sessions and overall symptom reduction. The case study includes common challenges that may occur during VRE and relevant recommendations. The discussion will include lessons learned from the case study and the open clinical trial, recommendations for the flexible application of VRE, and the ongoing developments in the latest version of the VRE system, informed by feedback acquired from the clinicians and patients who experienced it in the initial clinical trial.},
keywords = {DTIC, MedVR},
pubstate = {published},
tppubtype = {article}
}
Virtual reality exposure therapy (VRE) has been used in the treatment of combat‐related PTSD since the late 1990s and was recently adapted to treat PTSD due to military sexual trauma (MST). With content specifically tailored to MST‐related contexts, we present the case study of a military veteran who participated in the open clinical trial examining the feasibility of VRE in the treatment of MST‐related PTSD (Loucks et al. 2019). We illustrate VRE's use in activating the trauma memory to facilitate therapeutic emotional processing across sessions and overall symptom reduction. The case study includes common challenges that may occur during VRE and relevant recommendations. The discussion will include lessons learned from the case study and the open clinical trial, recommendations for the flexible application of VRE, and the ongoing developments in the latest version of the VRE system, informed by feedback acquired from the clinicians and patients who experienced it in the initial clinical trial.
Chen, Meida; Han, Kangle; Yu, Zifan; Feng, Andrew; Hou, Yu; You, Suya; Soibelman, Lucio
An Aerial Photogrammetry Benchmark Dataset for Point Cloud Segmentation and Style Translation Journal Article
In: Remote Sensing, vol. 16, no. 22, pp. 4240, 2024, ISSN: 2072-4292.
Abstract | Links | BibTeX | Tags: DTIC, VGL
@article{chen_aerial_2024,
title = {An Aerial Photogrammetry Benchmark Dataset for Point Cloud Segmentation and Style Translation},
author = {Meida Chen and Kangle Han and Zifan Yu and Andrew Feng and Yu Hou and Suya You and Lucio Soibelman},
url = {https://www.mdpi.com/2072-4292/16/22/4240},
doi = {10.3390/rs16224240},
issn = {2072-4292},
year  = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {Remote Sensing},
volume = {16},
number = {22},
pages = {4240},
abstract = {The recent surge in diverse 3D datasets spanning various scales and applications marks a significant advancement in the field. However, the comprehensive process of data acquisition, refinement, and annotation at a large scale poses a formidable challenge, particularly for individual researchers and small teams. To this end, we present a novel synthetic 3D point cloud generation framework that can produce detailed outdoor aerial photogrammetric 3D datasets with accurate ground truth annotations without the labor-intensive and time-consuming data collection/annotation processes. Our pipeline procedurally generates synthetic environments, mirroring real-world data collection and 3D reconstruction processes. A key feature of our framework is its ability to replicate consistent quality, noise patterns, and diversity similar to real-world datasets. This is achieved by adopting UAV flight patterns that resemble those used in real-world data collection processes (e.g., the cross-hatch flight pattern) across various synthetic terrains that are procedurally generated, thereby ensuring data consistency akin to real-world scenarios. Moreover, the generated datasets are enriched with precise semantic and instance annotations, eliminating the need for manual labeling. Our approach has led to the development and release of the Semantic Terrain Points Labeling—Synthetic 3D (STPLS3D) benchmark, an extensive outdoor 3D dataset encompassing over 16 km2, featuring up to 19 semantic labels. We also collected, reconstructed, and annotated four real-world datasets for validation purposes. Extensive experiments on these datasets demonstrate our synthetic datasets’ effectiveness, superior quality, and their value as a benchmark dataset for further point cloud research.},
keywords = {DTIC, VGL},
pubstate = {published},
tppubtype = {article}
}
Bonial, Claire; Lukin, Stephanie M.; Abrams, Mitchell; Baker, Anthony; Donatelli, Lucia; Foots, Ashley; Hayes, Cory J.; Henry, Cassidy; Hudson, Taylor; Marge, Matthew; Pollard, Kimberly A.; Artstein, Ron; Traum, David; Voss, Clare R.
Human–robot dialogue annotation for multi-modal common ground Journal Article
In: Lang Resources & Evaluation, 2024, ISSN: 1574-020X, 1574-0218.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{bonial_humanrobot_2024,
title = {Human–robot dialogue annotation for multi-modal common ground},
author = {Claire Bonial and Stephanie M. Lukin and Mitchell Abrams and Anthony Baker and Lucia Donatelli and Ashley Foots and Cory J. Hayes and Cassidy Henry and Taylor Hudson and Matthew Marge and Kimberly A. Pollard and Ron Artstein and David Traum and Clare R. Voss},
url = {https://link.springer.com/10.1007/s10579-024-09784-2},
doi = {10.1007/s10579-024-09784-2},
issn = {1574-020X, 1574-0218},
year  = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {Lang Resources & Evaluation},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Marti, Deniz; Budathoki, Anjila; Ding, Yi; Lucas, Gale; Nelson, David
How Does Acknowledging Users’ Preferences Impact AI’s Ability to Make Conflicting Recommendations? Journal Article
In: International Journal of Human–Computer Interaction, pp. 1–12, 2024, ISSN: 1044-7318, 1532-7590.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{marti_how_2024,
title = {How Does Acknowledging Users’ Preferences Impact AI’s Ability to Make Conflicting Recommendations?},
author = {Deniz Marti and Anjila Budathoki and Yi Ding and Gale Lucas and David Nelson},
url = {https://www.tandfonline.com/doi/full/10.1080/10447318.2024.2426035},
doi = {10.1080/10447318.2024.2426035},
issn = {1044-7318, 1532-7590},
year  = {2024},
date = {2024-11-01},
urldate = {2024-12-05},
journal = {International Journal of Human–Computer Interaction},
pages = {1–12},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}