Publications
Search
Gurney, Nikolos; Loewenstein, George; Chater, Nick
Conversational technology and reactions to withheld information Journal Article
In: PLoS ONE, vol. 19, no. 4, pp. e0301382, 2024, ISSN: 1932-6203.
@article{gurney_conversational_2024,
title = {Conversational technology and reactions to withheld information},
author = {Nikolos Gurney and George Loewenstein and Nick Chater},
editor = {Petre Caraiani},
url = {https://dx.plos.org/10.1371/journal.pone.0301382},
doi = {10.1371/journal.pone.0301382},
issn = {1932-6203},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-16},
journal = {PLoS ONE},
volume = {19},
number = {4},
pages = {e0301382},
abstract = {People frequently face decisions that require making inferences about withheld information. The advent of large language models coupled with conversational technology, e.g., Alexa, Siri, Cortana, and the Google Assistant, is changing the mode in which people make these inferences. We demonstrate that conversational modes of information provision, relative to traditional digital media, result in more critical responses to withheld information, including: (1) a reduction in evaluations of a product or service for which information is withheld and (2) an increased likelihood of recalling that information was withheld. These effects are robust across multiple conversational modes: a recorded phone conversation, an unfolding chat conversation, and a conversation script. We provide further evidence that these effects hold for conversations with the Google Assistant, a prominent conversational technology. The experimental results point to participants’ intuitions about why the information was withheld as the driver of the effect.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ustun, Volkan; Jorvekar, Ronit; Gurney, Nikolos; Pynadath, David; Wang, Yunzhe
Assessing Routing Decisions of Search and Rescue Teams in Service of an Artificial Social Intelligence Agent: Proceedings Article
In: Proceedings of the 16th International Conference on Agents and Artificial Intelligence, pp. 313–320, SCITEPRESS - Science and Technology Publications, Rome, Italy, 2024, ISBN: 978-989-758-680-4.
@inproceedings{ustun_assessing_2024,
title = {Assessing Routing Decisions of Search and Rescue Teams in Service of an Artificial Social Intelligence Agent:},
author = {Volkan Ustun and Ronit Jorvekar and Nikolos Gurney and David Pynadath and Yunzhe Wang},
url = {https://www.scitepress.org/DigitalLibrary/Link.aspx?doi=10.5220/0012388100003636},
doi = {10.5220/0012388100003636},
isbn = {978-989-758-680-4},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-19},
booktitle = {Proceedings of the 16th International Conference on Agents and Artificial Intelligence},
pages = {313–320},
publisher = {SCITEPRESS - Science and Technology Publications},
address = {Rome, Italy},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Ustun, Volkan
Spontaneous Theory of Mind for Artificial Intelligence Journal Article
In: 2024, (Publisher: [object Object] Version Number: 1).
@article{gurney_spontaneous_2024,
title = {Spontaneous Theory of Mind for Artificial Intelligence},
author = {Nikolos Gurney and David V. Pynadath and Volkan Ustun},
url = {https://arxiv.org/abs/2402.13272},
doi = {10.48550/ARXIV.2402.13272},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
abstract = {Existing approaches to Theory of Mind (ToM) in Artificial Intelligence (AI) overemphasize prompted, or cue-based, ToM, which may limit our collective ability to develop Artificial Social Intelligence (ASI). Drawing from research in computer science, cognitive science, and related disciplines, we contrast prompted ToM with what we call spontaneous ToM – reasoning about others' mental states that is grounded in unintentional, possibly uncontrollable cognitive functions. We argue for a principled approach to studying and developing AI ToM and suggest that a robust, or general, ASI will respond to prompts textbackslashtextitand spontaneously engage in social reasoning.},
note = {Publisher: [object Object]
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Morstatter, Fred; Pynadath, David V.; Russell, Adam; Satyukov, Gleb
Operational Collective Intelligence of Humans and Machines Journal Article
In: 2024, (Publisher: [object Object] Version Number: 1).
@article{gurney_operational_2024,
title = {Operational Collective Intelligence of Humans and Machines},
author = {Nikolos Gurney and Fred Morstatter and David V. Pynadath and Adam Russell and Gleb Satyukov},
url = {https://arxiv.org/abs/2402.13273},
doi = {10.48550/ARXIV.2402.13273},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
abstract = {We explore the use of aggregative crowdsourced forecasting (ACF) as a mechanism to help operationalize ``collective intelligence'' of human-machine teams for coordinated actions. We adopt the definition for Collective Intelligence as: ``A property of groups that emerges from synergies among data-information-knowledge, software-hardware, and individuals (those with new insights as well as recognized authorities) that enables just-in-time knowledge for better decisions than these three elements acting alone.'' Collective Intelligence emerges from new ways of connecting humans and AI to enable decision-advantage, in part by creating and leveraging additional sources of information that might otherwise not be included. Aggregative crowdsourced forecasting (ACF) is a recent key advancement towards Collective Intelligence wherein predictions (Xtextbackslash% probability that Y will happen) and rationales (why I believe it is this probability that X will happen) are elicited independently from a diverse crowd, aggregated, and then used to inform higher-level decision-making. This research asks whether ACF, as a key way to enable Operational Collective Intelligence, could be brought to bear on operational scenarios (i.e., sequences of events with defined agents, components, and interactions) and decision-making, and considers whether such a capability could provide novel operational capabilities to enable new forms of decision-advantage.},
note = {Publisher: [object Object]
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ehsanpour, Mahsa; Reid, Ian; Rezatofighi, Hamid
Social-MAE: Social Masked Autoencoder for Multi-person Motion Representation Learning Miscellaneous
2024, (Version Number: 1).
@misc{ehsanpour_social-mae_2024,
title = {Social-MAE: Social Masked Autoencoder for Multi-person Motion Representation Learning},
author = {Mahsa Ehsanpour and Ian Reid and Hamid Rezatofighi},
url = {https://arxiv.org/abs/2404.05578},
doi = {10.48550/ARXIV.2404.05578},
year = {2024},
date = {2024-01-01},
urldate = {2024-07-12},
publisher = {arXiv},
abstract = {For a complete comprehension of multi-person scenes, it is essential to go beyond basic tasks like detection and tracking. Higher-level tasks, such as understanding the interactions and social activities among individuals, are also crucial. Progress towards models that can fully understand scenes involving multiple people is hindered by a lack of sufficient annotated data for such high-level tasks. To address this challenge, we introduce Social-MAE, a simple yet effective transformer-based masked autoencoder framework for multi-person human motion data. The framework uses masked modeling to pre-train the encoder to reconstruct masked human joint trajectories, enabling it to learn generalizable and data efficient representations of motion in human crowded scenes. Social-MAE comprises a transformer as the MAE encoder and a lighter-weight transformer as the MAE decoder which operates on multi-person joints' trajectory in the frequency domain. After the reconstruction task, the MAE decoder is replaced with a task-specific decoder and the model is fine-tuned end-to-end for a variety of high-level social tasks. Our proposed model combined with our pre-training approach achieves the state-of-the-art results on various high-level social tasks, including multi-person pose forecasting, social grouping, and social action understanding. These improvements are demonstrated across four popular multi-person datasets encompassing both human 2D and 3D body pose.},
note = {Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Gurney, Nikolos; Miller, John H.; Pynadath, David V.
The Role of Heuristics and Biases during Complex Choices with an AI Teammate Journal Article
In: AAAI, vol. 37, no. 5, pp. 5993–6001, 2023, ISSN: 2374-3468, 2159-5399.
@article{gurney_role_2023,
title = {The Role of Heuristics and Biases during Complex Choices with an AI Teammate},
author = {Nikolos Gurney and John H. Miller and David V. Pynadath},
url = {https://ojs.aaai.org/index.php/AAAI/article/view/25741},
doi = {10.1609/aaai.v37i5.25741},
issn = {2374-3468, 2159-5399},
year = {2023},
date = {2023-06-01},
urldate = {2023-12-08},
journal = {AAAI},
volume = {37},
number = {5},
pages = {5993–6001},
abstract = {Behavioral scientists have classically documented aversion to algorithmic decision aids, from simple linear models to AI. Sentiment, however, is changing and possibly accelerating AI helper usage. AI assistance is, arguably, most valuable when humans must make complex choices. We argue that classic experimental methods used to study heuristics and biases are insufficient for studying complex choices made with AI helpers. We adapted an experimental paradigm designed for studying complex choices in such contexts. We show that framing and anchoring effects impact how people work with an AI helper and are predictive of choice outcomes. The evidence suggests that some participants, particularly those in a loss frame, put too much faith in the AI helper and experienced worse choice outcomes by doing so. The paradigm also generates computational modeling-friendly data allowing future studies of human-AI decision making.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-05-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Dilkina, Bistra; Jeong, David C.; John, Richard S.; Marsella, Stacy C.; Merchant, Chirag; Miller, Lynn C.; Read, Stephen J.
Disaster world Journal Article
In: Comput Math Organ Theory, vol. 29, no. 1, pp. 84–117, 2023, ISSN: 1572-9346.
@article{pynadath_disaster_2023,
title = {Disaster world},
author = {David V. Pynadath and Bistra Dilkina and David C. Jeong and Richard S. John and Stacy C. Marsella and Chirag Merchant and Lynn C. Miller and Stephen J. Read},
url = {https://doi.org/10.1007/s10588-022-09359-y},
doi = {10.1007/s10588-022-09359-y},
issn = {1572-9346},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Comput Math Organ Theory},
volume = {29},
number = {1},
pages = {84–117},
abstract = {Artificial intelligence (AI) research provides a rich source of modeling languages capable of generating socially plausible simulations of human behavior, while also providing a transparent ground truth that can support validation of social-science methods applied to that simulation. In this work, we leverage two established AI representations: decision-theoretic planning and recursive modeling. Decision-theoretic planning (specifically Partially Observable Markov Decision Processes) provides agents with quantitative models of their corresponding real-world entities’ subjective (and possibly incorrect) perspectives of ground truth in the form of probabilistic beliefs and utility functions. Recursive modeling gives an agent a theory of mind, which is necessary when a person’s (again, possibly incorrect) subjective perspectives are of another person, rather than of just his/her environment. We used PsychSim, a multiagent social-simulation framework combining these two AI frameworks, to build a general parameterized model of human behavior during disaster response, grounding the model in social-psychological theories to ensure social plausibility. We then instantiated that model into alternate ground truths for simulating population response to a series of natural disasters, namely, hurricanes. The simulations generate data in response to socially plausible instruments (e.g., surveys) that serve as input to the Ground Truth program’s designated research teams for them to conduct simulated social science. The simulation also provides a graphical ground truth and a set of outcomes to be used as the gold standard in evaluating the research teams’ inferences.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wu, Haochen; Sequeira, Pedro; Pynadath, David V.
Multiagent Inverse Reinforcement Learning via Theory of Mind Reasoning Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
@article{wu_multiagent_2023,
title = {Multiagent Inverse Reinforcement Learning via Theory of Mind Reasoning},
author = {Haochen Wu and Pedro Sequeira and David V. Pynadath},
url = {https://arxiv.org/abs/2302.10238},
doi = {10.48550/ARXIV.2302.10238},
year = {2023},
date = {2023-02-01},
urldate = {2023-08-24},
abstract = {We approach the problem of understanding how people interact with each other in collaborative settings, especially when individuals know little about their teammates, via Multiagent Inverse Reinforcement Learning (MIRL), where the goal is to infer the reward functions guiding the behavior of each individual given trajectories of a team's behavior during some task. Unlike current MIRL approaches, we do not assume that team members know each other's goals a priori; rather, that they collaborate by adapting to the goals of others perceived by observing their behavior, all while jointly performing a task. To address this problem, we propose a novel approach to MIRL via Theory of Mind (MIRL-ToM). For each agent, we first use ToM reasoning to estimate a posterior distribution over baseline reward profiles given their demonstrated behavior. We then perform MIRL via decentralized equilibrium by employing single-agent Maximum Entropy IRL to infer a reward function for each agent, where we simulate the behavior of other teammates according to the time-varying distribution over profiles. We evaluate our approach in a simulated 2-player search-and-rescue operation where the goal of the agents, playing different roles, is to search for and evacuate victims in the environment. Our results show that the choice of baseline profiles is paramount to the recovery of the ground-truth rewards, and that MIRL-ToM is able to recover the rewards used by agents interacting both with known and unknown teammates.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions Book Section
In: vol. 13832, pp. 175–197, 2023, (arXiv:2302.01854 [cs]).
@incollection{gurney_comparing_2023,
title = {Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2302.01854},
doi = {10.1007/978-3-031-30933-5_12},
year = {2023},
date = {2023-02-01},
urldate = {2023-08-15},
volume = {13832},
pages = {175–197},
abstract = {Optimization of human-AI teams hinges on the AI's ability to tailor its interaction to individual human teammates. A common hypothesis in adaptive AI research is that minor differences in people's predisposition to trust can significantly impact their likelihood of complying with recommendations from the AI. Predisposition to trust is often measured with self-report inventories that are administered before interactions. We benchmark a popular measure of this kind against behavioral predictors of compliance. We find that the inventory is a less effective predictor of compliance than the behavioral measures in datasets taken from three previous research projects. This suggests a general property that individual differences in initial behavior are more predictive than differences in self-reported trust attitudes. This result also shows a potential for easily accessible behavioral measures to provide an AI with more accurate models without the use of (often costly) survey instruments.},
note = {arXiv:2302.01854 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; Pynadath, David; Wang, Ning
My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes Book Section
In: vol. 14051, pp. 232–248, 2023, (arXiv:2301.09011 [cs]).
@incollection{gurney_my_2023,
title = {My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes},
author = {Nikolos Gurney and David Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2301.09011},
doi = {10.1007/978-3-031-35894-4_17},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {14051},
pages = {232–248},
abstract = {An implicit expectation of asking users to rate agents, such as an AI decision-aid, is that they will use only relevant information – ask them about an agent's benevolence, and they should consider whether or not it was kind. Behavioral science, however, suggests that people sometimes use irrelevant information. We identify an instance of this phenomenon, where users who experience better outcomes in a human-agent interaction systematically rated the agent as having better abilities, being more benevolent, and exhibiting greater integrity in a post hoc assessment than users who experienced worse outcome – which were the result of their own behavior – with the same agent. Our analyses suggest the need for augmentation of models so that they account for such biased perceptions as well as mechanisms so that agents can detect and even actively work to correct this and similar biases of users.},
note = {arXiv:2301.09011 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Pynadath, David V.; Gurney, Nikolos; Wang, Ning
Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency Proceedings Article
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 749–756, 2022, (ISSN: 1944-9437).
@inproceedings{pynadath_explainable_2022,
title = {Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency},
author = {David V. Pynadath and Nikolos Gurney and Ning Wang},
doi = {10.1109/RO-MAN53752.2022.9900608},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {749–756},
abstract = {Understanding the decisions of AI-driven systems and the rationale behind such decisions is key to the success of the human-robot team. However, the complexity and the "black-box" nature of many AI algorithms create a barrier for establishing such understanding within their human counterparts. Reinforcement Learning (RL), a machine-learning algorithm based on the simple idea of action-reward mappings, has a rich quantitative representation and a complex iterative reasoning process that present a significant obstacle to human understanding of, for example, how value functions are constructed, how the algorithms update the value functions, and how such updates impact the action/policy chosen by the robot. In this paper, we discuss our work to address this challenge by developing a decision-tree based explainable model for RL to make a robot’s decision-making process more transparent. Set in a human-robot virtual teaming testbed, we conducted a study to assess the impact of the explanations, generated using decision trees, on building transparency, calibrating trust, and improving the overall human-robot team’s performance. We discuss the design of the explainable model and the positive impact of the explanations on outcome measures.},
note = {ISSN: 1944-9437},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.
Robots with Theory of Mind for Humans: A Survey Proceedings Article
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 993–1000, 2022, (ISSN: 1944-9437).
@inproceedings{gurney_robots_2022,
title = {Robots with Theory of Mind for Humans: A Survey},
author = {Nikolos Gurney and David V. Pynadath},
url = {https://ieeexplore.ieee.org/abstract/document/9900662},
doi = {10.1109/RO-MAN53752.2022.9900662},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {993–1000},
abstract = {Theory of Mind (ToM) is a psychological construct that captures the ability to ascribe mental states to others and then use those representations for explaining and predicting behavior. We review recent progress in endowing artificially intelligent robots with ToM. A broad array of modeling, experimental, and benchmarking approaches and methods are present in the extant literature. Unlike other domains of human cognition for which research has achieved super-human capabilities, ToM for robots lacks a unified construct and is not consistently benchmarked or validated—realities which possibly hinder progress in this domain. We argue that this is, at least in part, due to inconsistent defining of ToM, no presence of a unifying modeling construct, and the absence of a shared data resource. We believe these would improve the ability of the research community to compare the ToM abilities of different systems. We suggest that establishing a shared definition of ToM, creating a shared data resource that supports consistent benchmarking & validation, and developing a generalized modeling tool are critical steps towards giving robots ToM capabilities that lay observers will recognize as such.},
note = {ISSN: 1944-9437},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Dilkina, Bistra; Jeong, David C.; John, Richard S.; Marsella, Stacy C.; Merchant, Chirag; Miller, Lynn C.; Read, Stephen J.
Disaster world Journal Article
In: Comput Math Organ Theory, 2022, ISSN: 1572-9346.
@article{pynadath_disaster_2022,
title = {Disaster world},
author = {David V. Pynadath and Bistra Dilkina and David C. Jeong and Richard S. John and Stacy C. Marsella and Chirag Merchant and Lynn C. Miller and Stephen J. Read},
url = {https://doi.org/10.1007/s10588-022-09359-y},
doi = {10.1007/s10588-022-09359-y},
issn = {1572-9346},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-28},
journal = {Comput Math Organ Theory},
abstract = {Artificial intelligence (AI) research provides a rich source of modeling languages capable of generating socially plausible simulations of human behavior, while also providing a transparent ground truth that can support validation of social-science methods applied to that simulation. In this work, we leverage two established AI representations: decision-theoretic planning and recursive modeling. Decision-theoretic planning (specifically Partially Observable Markov Decision Processes) provides agents with quantitative models of their corresponding real-world entities’ subjective (and possibly incorrect) perspectives of ground truth in the form of probabilistic beliefs and utility functions. Recursive modeling gives an agent a theory of mind, which is necessary when a person’s (again, possibly incorrect) subjective perspectives are of another person, rather than of just his/her environment. We used PsychSim, a multiagent social-simulation framework combining these two AI frameworks, to build a general parameterized model of human behavior during disaster response, grounding the model in social-psychological theories to ensure social plausibility. We then instantiated that model into alternate ground truths for simulating population response to a series of natural disasters, namely, hurricanes. The simulations generate data in response to socially plausible instruments (e.g., surveys) that serve as input to the Ground Truth program’s designated research teams for them to conduct simulated social science. The simulation also provides a graphical ground truth and a set of outcomes to be used as the gold standard in evaluating the research teams’ inferences.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan
The Impact of Personalized Feedback on Negotiation Training Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. Volume 9, pp. 92–104, US Army Combat Capabilities Development Command–Soldier Center, 2022.
@incollection{johnson_impact_2022,
title = {The Impact of Personalized Feedback on Negotiation Training},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://adlnet.gov/assets/uploads/Vol%209_CompetencyBasedScenarioDesignBook_Complete_Final_021722v2.pdf#page=93},
year = {2022},
date = {2022-02-01},
urldate = {2022-02-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {Volume 9},
pages = {92–104},
publisher = {US Army Combat Capabilities Development Command–Soldier Center},
series = {Competency, Based Scenario Design},
abstract = {Intelligent tutoring systems (ITSs) have made great strides in teaching cognitive skills, including math (Koedinger et al., 1997; Koedinger & Corbett, 2005; Koedinger & Corbett, 2006), reading (Mills-Tettey, et al., 2009; Wijekumar et al., 2005;) and computer literacy (Guo, 2015; Olney et al., 2017;). Recent research has begun to extend these techniques to interpersonal skills such as public speaking (Chollet et al., 2014), medical interviews (Pataki, 2012; Stevens, 2006), collaborative problem solving (Graesser et al., 2018) and negotiation (Gratch et al., 2016; Kim et al., 2009). An extensive body of research has documented the benefits of ITSs for cognitive skill development, but relative to this, research on ITSs for interpersonal skills is still in its infancy. This chapter highlights our efforts in adapting ITS techniques to teaching negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Proceedings Article
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661–674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; King, Tyler; Miller, John H.
An Experimental Method for Studying Complex Choices Proceedings Article
In: Stephanidis, Constantine; Antona, Margherita; Ntoa, Stavroula; Salvendy, Gavriel (Ed.): HCI International 2022 – Late Breaking Posters, pp. 39–45, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-19679-9.
@inproceedings{gurney_experimental_2022,
title = {An Experimental Method for Studying Complex Choices},
author = {Nikolos Gurney and Tyler King and John H. Miller},
editor = {Constantine Stephanidis and Margherita Antona and Stavroula Ntoa and Gavriel Salvendy},
url = {https://link.springer.com/chapter/10.1007/978-3-031-19679-9_6},
doi = {10.1007/978-3-031-19679-9_6},
isbn = {978-3-031-19679-9},
year = {2022},
date = {2022-01-01},
booktitle = {HCI International 2022 – Late Breaking Posters},
pages = {39–45},
publisher = {Springer Nature Switzerland},
address = {Cham},
series = {Communications in Computer and Information Science},
abstract = {The promise of computational decision aids, from review sites to emerging augmented cognition technology, is the potential for better choice outcomes. This promise is grounded in the notion that we understand human decision processes well enough to design useful interventions. Although researchers have made considerable advances in the understanding of human judgment and decision making, these efforts are mostly based on the analysis of simple, often linear choices. Cumulative Prospect Theory (CPT), a famous explanation for decision making under uncertainty, was developed and validated using binary choice experiments in which options varied on a single dimension. Behavioral science has largely followed this simplified methodology. Here, we introduce an experimental paradigm specifically for studying humans making complex choices that incorporate multiple variables with nonlinear interactions. The task involves tuning dials, each of which controls a different dimension of a nonlinear problem. Initial results show that in such an environment participants demonstrate classic cognitive artifacts, such as anchoring and adjusting, along with falling into exploitive traps that prevent adequate exploration of these complex decisions. Preventing such errors suggest a potentially valuable role for deploying algorithmic decision aids to enhance decision making in complex choices.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic Proceedings Article
In: Kurosu, Masaaki (Ed.): Human-Computer Interaction. User Experience and Behavior, pp. 580–590, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05412-9.
@inproceedings{wang_toward_2022,
title = {Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05412-9_39},
doi = {10.1007/978-3-031-05412-9_39},
isbn = {978-3-031-05412-9},
year = {2022},
date = {2022-01-01},
booktitle = {Human-Computer Interaction. User Experience and Behavior},
pages = {580–590},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal charismatic strategies based on the research on charismatic leaders, which was then used to re-write an existing tutorial on the human circulatory system to express charisma. We then collected voice recordings of the tutorial in both charismatic and non-charismatic voices using actors from a crowd-sourcing platform. In this paper, we present the analysis of the charismatic and non-charismatic voice recordings, and discuss what nonverbal behaviors in speeches contribute to perceived charisma. Results can shed light on the synthesis of charismatic speeches for virtual characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Measuring and Predicting Human Trust in Recommendations from an AI Teammate Proceedings Article
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, pp. 22–34, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05643-7.
@inproceedings{gurney_measuring_2022,
title = {Measuring and Predicting Human Trust in Recommendations from an AI Teammate},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05643-7_2},
doi = {10.1007/978-3-031-05643-7_2},
isbn = {978-3-031-05643-7},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in HCI},
pages = {22–34},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Predicting compliance with AI recommendations and knowing when to intervene are critical facets of human-AI teaming. AIs are typically deployed in settings where their abilities to evaluate decision variables far exceed the abilities of their human counterparts. However, even though AIs excel at weighing multiple issues and computing near optimal solutions with speed and accuracy beyond that of any human, they still make mistakes. Thus, perfect compliance may be undesirable. This means, just as individuals must know when to follow the advice of other people, it is critical for them to know when to adopt the recommendations from their AI. Well-calibrated trust is thought to be a fundamental aspect of this type of knowledge. We compare the ability of a common trust inventory and the ability of a behavioral measure of trust to predict compliance and success in a reconnaissance mission. We interpret the experimental results to suggest that the behavioral measure is a better predictor of overall mission compliance and success. We discuss how this measure could possibly be used in compliance interventions and related open questions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Yunzhe; Gurney, Nikolos; Zhou, Jincheng; Pynadath, David V.; Ustun, Volkan
Route Optimization in Service of a Search and Rescue Artificial Social Intelligence Agent Book Section
In: Gurney, Nikolos; Sukthankar, Gita (Ed.): Computational Theory of Mind for Human-Machine Teams, vol. 13775, pp. 220–228, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-21670-1 978-3-031-21671-8, (Series Title: Lecture Notes in Computer Science).
@incollection{gurney_route_2022,
title = {Route Optimization in Service of a Search and Rescue Artificial Social Intelligence Agent},
author = {Yunzhe Wang and Nikolos Gurney and Jincheng Zhou and David V. Pynadath and Volkan Ustun},
editor = {Nikolos Gurney and Gita Sukthankar},
url = {https://link.springer.com/10.1007/978-3-031-21671-8_14},
doi = {10.1007/978-3-031-21671-8_14},
isbn = {978-3-031-21670-1 978-3-031-21671-8},
year = {2022},
date = {2022-01-01},
urldate = {2023-02-10},
booktitle = {Computational Theory of Mind for Human-Machine Teams},
volume = {13775},
pages = {220–228},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Filter
2024
Gurney, Nikolos; Loewenstein, George; Chater, Nick
Conversational technology and reactions to withheld information Journal Article
In: PLoS ONE, vol. 19, no. 4, pp. e0301382, 2024, ISSN: 1932-6203.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@article{gurney_conversational_2024,
title = {Conversational technology and reactions to withheld information},
author = {Nikolos Gurney and George Loewenstein and Nick Chater},
editor = {Petre Caraiani},
url = {https://dx.plos.org/10.1371/journal.pone.0301382},
doi = {10.1371/journal.pone.0301382},
issn = {1932-6203},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-16},
journal = {PLoS ONE},
volume = {19},
number = {4},
pages = {e0301382},
abstract = {People frequently face decisions that require making inferences about withheld information. The advent of large language models coupled with conversational technology, e.g., Alexa, Siri, Cortana, and the Google Assistant, is changing the mode in which people make these inferences. We demonstrate that conversational modes of information provision, relative to traditional digital media, result in more critical responses to withheld information, including: (1) a reduction in evaluations of a product or service for which information is withheld and (2) an increased likelihood of recalling that information was withheld. These effects are robust across multiple conversational modes: a recorded phone conversation, an unfolding chat conversation, and a conversation script. We provide further evidence that these effects hold for conversations with the Google Assistant, a prominent conversational technology. The experimental results point to participants’ intuitions about why the information was withheld as the driver of the effect.},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Ustun, Volkan; Jorvekar, Ronit; Gurney, Nikolos; Pynadath, David; Wang, Yunzhe
Assessing Routing Decisions of Search and Rescue Teams in Service of an Artificial Social Intelligence Agent: Proceedings Article
In: Proceedings of the 16th International Conference on Agents and Artificial Intelligence, pp. 313–320, SCITEPRESS - Science and Technology Publications, Rome, Italy, 2024, ISBN: 978-989-758-680-4.
Links | BibTeX | Tags: AI, Cognitive Architecture, Social Simulation
@inproceedings{ustun_assessing_2024,
title = {Assessing Routing Decisions of Search and Rescue Teams in Service of an Artificial Social Intelligence Agent:},
author = {Volkan Ustun and Ronit Jorvekar and Nikolos Gurney and David Pynadath and Yunzhe Wang},
url = {https://www.scitepress.org/DigitalLibrary/Link.aspx?doi=10.5220/0012388100003636},
doi = {10.5220/0012388100003636},
isbn = {978-989-758-680-4},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-19},
booktitle = {Proceedings of the 16th International Conference on Agents and Artificial Intelligence},
pages = {313–320},
publisher = {SCITEPRESS - Science and Technology Publications},
address = {Rome, Italy},
keywords = {AI, Cognitive Architecture, Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Ustun, Volkan
Spontaneous Theory of Mind for Artificial Intelligence Journal Article
In: 2024, (Publisher: [object Object] Version Number: 1).
Abstract | Links | BibTeX | Tags: AI, DTIC, Social Simulation, UARC
@article{gurney_spontaneous_2024,
title = {Spontaneous Theory of Mind for Artificial Intelligence},
author = {Nikolos Gurney and David V. Pynadath and Volkan Ustun},
url = {https://arxiv.org/abs/2402.13272},
doi = {10.48550/ARXIV.2402.13272},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
abstract = {Existing approaches to Theory of Mind (ToM) in Artificial Intelligence (AI) overemphasize prompted, or cue-based, ToM, which may limit our collective ability to develop Artificial Social Intelligence (ASI). Drawing from research in computer science, cognitive science, and related disciplines, we contrast prompted ToM with what we call spontaneous ToM – reasoning about others' mental states that is grounded in unintentional, possibly uncontrollable cognitive functions. We argue for a principled approach to studying and developing AI ToM and suggest that a robust, or general, ASI will respond to prompts textbackslashtextitand spontaneously engage in social reasoning.},
note = {Publisher: [object Object]
Version Number: 1},
keywords = {AI, DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Morstatter, Fred; Pynadath, David V.; Russell, Adam; Satyukov, Gleb
Operational Collective Intelligence of Humans and Machines Journal Article
In: 2024, (Publisher: [object Object] Version Number: 1).
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@article{gurney_operational_2024,
title = {Operational Collective Intelligence of Humans and Machines},
author = {Nikolos Gurney and Fred Morstatter and David V. Pynadath and Adam Russell and Gleb Satyukov},
url = {https://arxiv.org/abs/2402.13273},
doi = {10.48550/ARXIV.2402.13273},
year = {2024},
date = {2024-02-01},
urldate = {2024-03-14},
abstract = {We explore the use of aggregative crowdsourced forecasting (ACF) as a mechanism to help operationalize ``collective intelligence'' of human-machine teams for coordinated actions. We adopt the definition for Collective Intelligence as: ``A property of groups that emerges from synergies among data-information-knowledge, software-hardware, and individuals (those with new insights as well as recognized authorities) that enables just-in-time knowledge for better decisions than these three elements acting alone.'' Collective Intelligence emerges from new ways of connecting humans and AI to enable decision-advantage, in part by creating and leveraging additional sources of information that might otherwise not be included. Aggregative crowdsourced forecasting (ACF) is a recent key advancement towards Collective Intelligence wherein predictions (Xtextbackslash% probability that Y will happen) and rationales (why I believe it is this probability that X will happen) are elicited independently from a diverse crowd, aggregated, and then used to inform higher-level decision-making. This research asks whether ACF, as a key way to enable Operational Collective Intelligence, could be brought to bear on operational scenarios (i.e., sequences of events with defined agents, components, and interactions) and decision-making, and considers whether such a capability could provide novel operational capabilities to enable new forms of decision-advantage.},
note = {Publisher: [object Object]
Version Number: 1},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Ehsanpour, Mahsa; Reid, Ian; Rezatofighi, Hamid
Social-MAE: Social Masked Autoencoder for Multi-person Motion Representation Learning Miscellaneous
2024, (Version Number: 1).
Abstract | Links | BibTeX | Tags: Social Simulation
@misc{ehsanpour_social-mae_2024,
title = {Social-MAE: Social Masked Autoencoder for Multi-person Motion Representation Learning},
author = {Mahsa Ehsanpour and Ian Reid and Hamid Rezatofighi},
url = {https://arxiv.org/abs/2404.05578},
doi = {10.48550/ARXIV.2404.05578},
year = {2024},
date = {2024-01-01},
urldate = {2024-07-12},
publisher = {arXiv},
abstract = {For a complete comprehension of multi-person scenes, it is essential to go beyond basic tasks like detection and tracking. Higher-level tasks, such as understanding the interactions and social activities among individuals, are also crucial. Progress towards models that can fully understand scenes involving multiple people is hindered by a lack of sufficient annotated data for such high-level tasks. To address this challenge, we introduce Social-MAE, a simple yet effective transformer-based masked autoencoder framework for multi-person human motion data. The framework uses masked modeling to pre-train the encoder to reconstruct masked human joint trajectories, enabling it to learn generalizable and data efficient representations of motion in human crowded scenes. Social-MAE comprises a transformer as the MAE encoder and a lighter-weight transformer as the MAE decoder which operates on multi-person joints' trajectory in the frequency domain. After the reconstruction task, the MAE decoder is replaced with a task-specific decoder and the model is fine-tuned end-to-end for a variety of high-level social tasks. Our proposed model combined with our pre-training approach achieves the state-of-the-art results on various high-level social tasks, including multi-person pose forecasting, social grouping, and social action understanding. These improvements are demonstrated across four popular multi-person datasets encompassing both human 2D and 3D body pose.},
note = {Version Number: 1},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {misc}
}
2023
Gurney, Nikolos; Miller, John H.; Pynadath, David V.
The Role of Heuristics and Biases during Complex Choices with an AI Teammate Journal Article
In: AAAI, vol. 37, no. 5, pp. 5993–6001, 2023, ISSN: 2374-3468, 2159-5399.
Abstract | Links | BibTeX | Tags: AI, DTIC, Social Simulation, UARC
@article{gurney_role_2023,
title = {The Role of Heuristics and Biases during Complex Choices with an AI Teammate},
author = {Nikolos Gurney and John H. Miller and David V. Pynadath},
url = {https://ojs.aaai.org/index.php/AAAI/article/view/25741},
doi = {10.1609/aaai.v37i5.25741},
issn = {2374-3468, 2159-5399},
year = {2023},
date = {2023-06-01},
urldate = {2023-12-08},
journal = {AAAI},
volume = {37},
number = {5},
pages = {5993–6001},
abstract = {Behavioral scientists have classically documented aversion to algorithmic decision aids, from simple linear models to AI. Sentiment, however, is changing and possibly accelerating AI helper usage. AI assistance is, arguably, most valuable when humans must make complex choices. We argue that classic experimental methods used to study heuristics and biases are insufficient for studying complex choices made with AI helpers. We adapted an experimental paradigm designed for studying complex choices in such contexts. We show that framing and anchoring effects impact how people work with an AI helper and are predictive of choice outcomes. The evidence suggests that some participants, particularly those in a loss frame, put too much faith in the AI helper and experienced worse choice outcomes by doing so. The paradigm also generates computational modeling-friendly data allowing future studies of human-AI decision making.},
keywords = {AI, DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-05-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Dilkina, Bistra; Jeong, David C.; John, Richard S.; Marsella, Stacy C.; Merchant, Chirag; Miller, Lynn C.; Read, Stephen J.
Disaster world Journal Article
In: Comput Math Organ Theory, vol. 29, no. 1, pp. 84–117, 2023, ISSN: 1572-9346.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@article{pynadath_disaster_2023,
title = {Disaster world},
author = {David V. Pynadath and Bistra Dilkina and David C. Jeong and Richard S. John and Stacy C. Marsella and Chirag Merchant and Lynn C. Miller and Stephen J. Read},
url = {https://doi.org/10.1007/s10588-022-09359-y},
doi = {10.1007/s10588-022-09359-y},
issn = {1572-9346},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Comput Math Organ Theory},
volume = {29},
number = {1},
pages = {84–117},
abstract = {Artificial intelligence (AI) research provides a rich source of modeling languages capable of generating socially plausible simulations of human behavior, while also providing a transparent ground truth that can support validation of social-science methods applied to that simulation. In this work, we leverage two established AI representations: decision-theoretic planning and recursive modeling. Decision-theoretic planning (specifically Partially Observable Markov Decision Processes) provides agents with quantitative models of their corresponding real-world entities’ subjective (and possibly incorrect) perspectives of ground truth in the form of probabilistic beliefs and utility functions. Recursive modeling gives an agent a theory of mind, which is necessary when a person’s (again, possibly incorrect) subjective perspectives are of another person, rather than of just his/her environment. We used PsychSim, a multiagent social-simulation framework combining these two AI frameworks, to build a general parameterized model of human behavior during disaster response, grounding the model in social-psychological theories to ensure social plausibility. We then instantiated that model into alternate ground truths for simulating population response to a series of natural disasters, namely, hurricanes. The simulations generate data in response to socially plausible instruments (e.g., surveys) that serve as input to the Ground Truth program’s designated research teams for them to conduct simulated social science. The simulation also provides a graphical ground truth and a set of outcomes to be used as the gold standard in evaluating the research teams’ inferences.},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Wu, Haochen; Sequeira, Pedro; Pynadath, David V.
Multiagent Inverse Reinforcement Learning via Theory of Mind Reasoning Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: AI, DTIC, Social Simulation
@article{wu_multiagent_2023,
title = {Multiagent Inverse Reinforcement Learning via Theory of Mind Reasoning},
author = {Haochen Wu and Pedro Sequeira and David V. Pynadath},
url = {https://arxiv.org/abs/2302.10238},
doi = {10.48550/ARXIV.2302.10238},
year = {2023},
date = {2023-02-01},
urldate = {2023-08-24},
abstract = {We approach the problem of understanding how people interact with each other in collaborative settings, especially when individuals know little about their teammates, via Multiagent Inverse Reinforcement Learning (MIRL), where the goal is to infer the reward functions guiding the behavior of each individual given trajectories of a team's behavior during some task. Unlike current MIRL approaches, we do not assume that team members know each other's goals a priori; rather, that they collaborate by adapting to the goals of others perceived by observing their behavior, all while jointly performing a task. To address this problem, we propose a novel approach to MIRL via Theory of Mind (MIRL-ToM). For each agent, we first use ToM reasoning to estimate a posterior distribution over baseline reward profiles given their demonstrated behavior. We then perform MIRL via decentralized equilibrium by employing single-agent Maximum Entropy IRL to infer a reward function for each agent, where we simulate the behavior of other teammates according to the time-varying distribution over profiles. We evaluate our approach in a simulated 2-player search-and-rescue operation where the goal of the agents, playing different roles, is to search for and evacuate victims in the environment. Our results show that the choice of baseline profiles is paramount to the recovery of the ground-truth rewards, and that MIRL-ToM is able to recover the rewards used by agents interacting both with known and unknown teammates.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {AI, DTIC, Social Simulation},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions Book Section
In: vol. 13832, pp. 175–197, 2023, (arXiv:2302.01854 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC, Social Simulation, UARC
@incollection{gurney_comparing_2023,
title = {Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2302.01854},
doi = {10.1007/978-3-031-30933-5_12},
year = {2023},
date = {2023-02-01},
urldate = {2023-08-15},
volume = {13832},
pages = {175–197},
abstract = {Optimization of human-AI teams hinges on the AI's ability to tailor its interaction to individual human teammates. A common hypothesis in adaptive AI research is that minor differences in people's predisposition to trust can significantly impact their likelihood of complying with recommendations from the AI. Predisposition to trust is often measured with self-report inventories that are administered before interactions. We benchmark a popular measure of this kind against behavioral predictors of compliance. We find that the inventory is a less effective predictor of compliance than the behavioral measures in datasets taken from three previous research projects. This suggests a general property that individual differences in initial behavior are more predictive than differences in self-reported trust attitudes. This result also shows a potential for easily accessible behavioral measures to provide an AI with more accurate models without the use of (often costly) survey instruments.},
note = {arXiv:2302.01854 [cs]},
keywords = {AI, DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; Pynadath, David; Wang, Ning
My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes Book Section
In: vol. 14051, pp. 232–248, 2023, (arXiv:2301.09011 [cs]).
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@incollection{gurney_my_2023,
title = {My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes},
author = {Nikolos Gurney and David Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2301.09011},
doi = {10.1007/978-3-031-35894-4_17},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {14051},
pages = {232–248},
abstract = {An implicit expectation of asking users to rate agents, such as an AI decision-aid, is that they will use only relevant information – ask them about an agent's benevolence, and they should consider whether or not it was kind. Behavioral science, however, suggests that people sometimes use irrelevant information. We identify an instance of this phenomenon, where users who experience better outcomes in a human-agent interaction systematically rated the agent as having better abilities, being more benevolent, and exhibiting greater integrity in a post hoc assessment than users who experienced worse outcome – which were the result of their own behavior – with the same agent. Our analyses suggest the need for augmentation of models so that they account for such biased perceptions as well as mechanisms so that agents can detect and even actively work to correct this and similar biases of users.},
note = {arXiv:2301.09011 [cs]},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
2022
Pynadath, David V.; Gurney, Nikolos; Wang, Ning
Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency Proceedings Article
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 749–756, 2022, (ISSN: 1944-9437).
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{pynadath_explainable_2022,
title = {Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency},
author = {David V. Pynadath and Nikolos Gurney and Ning Wang},
doi = {10.1109/RO-MAN53752.2022.9900608},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {749–756},
abstract = {Understanding the decisions of AI-driven systems and the rationale behind such decisions is key to the success of the human-robot team. However, the complexity and the "black-box" nature of many AI algorithms create a barrier for establishing such understanding within their human counterparts. Reinforcement Learning (RL), a machine-learning algorithm based on the simple idea of action-reward mappings, has a rich quantitative representation and a complex iterative reasoning process that present a significant obstacle to human understanding of, for example, how value functions are constructed, how the algorithms update the value functions, and how such updates impact the action/policy chosen by the robot. In this paper, we discuss our work to address this challenge by developing a decision-tree based explainable model for RL to make a robot’s decision-making process more transparent. Set in a human-robot virtual teaming testbed, we conducted a study to assess the impact of the explanations, generated using decision trees, on building transparency, calibrating trust, and improving the overall human-robot team’s performance. We discuss the design of the explainable model and the positive impact of the explanations on outcome measures.},
note = {ISSN: 1944-9437},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.
Robots with Theory of Mind for Humans: A Survey Proceedings Article
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 993–1000, 2022, (ISSN: 1944-9437).
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{gurney_robots_2022,
title = {Robots with Theory of Mind for Humans: A Survey},
author = {Nikolos Gurney and David V. Pynadath},
url = {https://ieeexplore.ieee.org/abstract/document/9900662},
doi = {10.1109/RO-MAN53752.2022.9900662},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {993–1000},
abstract = {Theory of Mind (ToM) is a psychological construct that captures the ability to ascribe mental states to others and then use those representations for explaining and predicting behavior. We review recent progress in endowing artificially intelligent robots with ToM. A broad array of modeling, experimental, and benchmarking approaches and methods are present in the extant literature. Unlike other domains of human cognition for which research has achieved super-human capabilities, ToM for robots lacks a unified construct and is not consistently benchmarked or validated—realities which possibly hinder progress in this domain. We argue that this is, at least in part, due to inconsistent defining of ToM, no presence of a unifying modeling construct, and the absence of a shared data resource. We believe these would improve the ability of the research community to compare the ToM abilities of different systems. We suggest that establishing a shared definition of ToM, creating a shared data resource that supports consistent benchmarking & validation, and developing a generalized modeling tool are critical steps towards giving robots ToM capabilities that lay observers will recognize as such.},
note = {ISSN: 1944-9437},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Dilkina, Bistra; Jeong, David C.; John, Richard S.; Marsella, Stacy C.; Merchant, Chirag; Miller, Lynn C.; Read, Stephen J.
Disaster world Journal Article
In: Comput Math Organ Theory, 2022, ISSN: 1572-9346.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation
@article{pynadath_disaster_2022,
title = {Disaster world},
author = {David V. Pynadath and Bistra Dilkina and David C. Jeong and Richard S. John and Stacy C. Marsella and Chirag Merchant and Lynn C. Miller and Stephen J. Read},
url = {https://doi.org/10.1007/s10588-022-09359-y},
doi = {10.1007/s10588-022-09359-y},
issn = {1572-9346},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-28},
journal = {Comput Math Organ Theory},
abstract = {Artificial intelligence (AI) research provides a rich source of modeling languages capable of generating socially plausible simulations of human behavior, while also providing a transparent ground truth that can support validation of social-science methods applied to that simulation. In this work, we leverage two established AI representations: decision-theoretic planning and recursive modeling. Decision-theoretic planning (specifically Partially Observable Markov Decision Processes) provides agents with quantitative models of their corresponding real-world entities’ subjective (and possibly incorrect) perspectives of ground truth in the form of probabilistic beliefs and utility functions. Recursive modeling gives an agent a theory of mind, which is necessary when a person’s (again, possibly incorrect) subjective perspectives are of another person, rather than of just his/her environment. We used PsychSim, a multiagent social-simulation framework combining these two AI frameworks, to build a general parameterized model of human behavior during disaster response, grounding the model in social-psychological theories to ensure social plausibility. We then instantiated that model into alternate ground truths for simulating population response to a series of natural disasters, namely, hurricanes. The simulations generate data in response to socially plausible instruments (e.g., surveys) that serve as input to the Ground Truth program’s designated research teams for them to conduct simulated social science. The simulation also provides a graphical ground truth and a set of outcomes to be used as the gold standard in evaluating the research teams’ inferences.},
keywords = {DTIC, Social Simulation},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan
The Impact of Personalized Feedback on Negotiation Training Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. Volume 9, pp. 92–104, US Army Combat Capabilities Development Command–Soldier Center, 2022.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@incollection{johnson_impact_2022,
title = {The Impact of Personalized Feedback on Negotiation Training},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://adlnet.gov/assets/uploads/Vol%209_CompetencyBasedScenarioDesignBook_Complete_Final_021722v2.pdf#page=93},
year = {2022},
date = {2022-02-01},
urldate = {2022-02-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {Volume 9},
pages = {92–104},
publisher = {US Army Combat Capabilities Development Command–Soldier Center},
series = {Competency, Based Scenario Design},
abstract = {Intelligent tutoring systems (ITSs) have made great strides in teaching cognitive skills, including math (Koedinger et al., 1997; Koedinger & Corbett, 2005; Koedinger & Corbett, 2006), reading (Mills-Tettey, et al., 2009; Wijekumar et al., 2005;) and computer literacy (Guo, 2015; Olney et al., 2017;). Recent research has begun to extend these techniques to interpersonal skills such as public speaking (Chollet et al., 2014), medical interviews (Pataki, 2012; Stevens, 2006), collaborative problem solving (Graesser et al., 2018) and negotiation (Gratch et al., 2016; Kim et al., 2009). An extensive body of research has documented the benefits of ITSs for cognitive skill development, but relative to this, research on ITSs for interpersonal skills is still in its infancy. This chapter highlights our efforts in adapting ITS techniques to teaching negotiation.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Proceedings Article
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661–674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; King, Tyler; Miller, John H.
An Experimental Method for Studying Complex Choices Proceedings Article
In: Stephanidis, Constantine; Antona, Margherita; Ntoa, Stavroula; Salvendy, Gavriel (Ed.): HCI International 2022 – Late Breaking Posters, pp. 39–45, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-19679-9.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{gurney_experimental_2022,
title = {An Experimental Method for Studying Complex Choices},
author = {Nikolos Gurney and Tyler King and John H. Miller},
editor = {Constantine Stephanidis and Margherita Antona and Stavroula Ntoa and Gavriel Salvendy},
url = {https://link.springer.com/chapter/10.1007/978-3-031-19679-9_6},
doi = {10.1007/978-3-031-19679-9_6},
isbn = {978-3-031-19679-9},
year = {2022},
date = {2022-01-01},
booktitle = {HCI International 2022 – Late Breaking Posters},
pages = {39–45},
publisher = {Springer Nature Switzerland},
address = {Cham},
series = {Communications in Computer and Information Science},
abstract = {The promise of computational decision aids, from review sites to emerging augmented cognition technology, is the potential for better choice outcomes. This promise is grounded in the notion that we understand human decision processes well enough to design useful interventions. Although researchers have made considerable advances in the understanding of human judgment and decision making, these efforts are mostly based on the analysis of simple, often linear choices. Cumulative Prospect Theory (CPT), a famous explanation for decision making under uncertainty, was developed and validated using binary choice experiments in which options varied on a single dimension. Behavioral science has largely followed this simplified methodology. Here, we introduce an experimental paradigm specifically for studying humans making complex choices that incorporate multiple variables with nonlinear interactions. The task involves tuning dials, each of which controls a different dimension of a nonlinear problem. Initial results show that in such an environment participants demonstrate classic cognitive artifacts, such as anchoring and adjusting, along with falling into exploitive traps that prevent adequate exploration of these complex decisions. Preventing such errors suggest a potentially valuable role for deploying algorithmic decision aids to enhance decision making in complex choices.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic Proceedings Article
In: Kurosu, Masaaki (Ed.): Human-Computer Interaction. User Experience and Behavior, pp. 580–590, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05412-9.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{wang_toward_2022,
title = {Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05412-9_39},
doi = {10.1007/978-3-031-05412-9_39},
isbn = {978-3-031-05412-9},
year = {2022},
date = {2022-01-01},
booktitle = {Human-Computer Interaction. User Experience and Behavior},
pages = {580–590},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal charismatic strategies based on the research on charismatic leaders, which was then used to re-write an existing tutorial on the human circulatory system to express charisma. We then collected voice recordings of the tutorial in both charismatic and non-charismatic voices using actors from a crowd-sourcing platform. In this paper, we present the analysis of the charismatic and non-charismatic voice recordings, and discuss what nonverbal behaviors in speeches contribute to perceived charisma. Results can shed light on the synthesis of charismatic speeches for virtual characters.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Measuring and Predicting Human Trust in Recommendations from an AI Teammate Proceedings Article
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, pp. 22–34, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05643-7.
Abstract | Links | BibTeX | Tags: AI, Social Simulation, UARC
@inproceedings{gurney_measuring_2022,
title = {Measuring and Predicting Human Trust in Recommendations from an AI Teammate},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05643-7_2},
doi = {10.1007/978-3-031-05643-7_2},
isbn = {978-3-031-05643-7},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in HCI},
pages = {22–34},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Predicting compliance with AI recommendations and knowing when to intervene are critical facets of human-AI teaming. AIs are typically deployed in settings where their abilities to evaluate decision variables far exceed the abilities of their human counterparts. However, even though AIs excel at weighing multiple issues and computing near optimal solutions with speed and accuracy beyond that of any human, they still make mistakes. Thus, perfect compliance may be undesirable. This means, just as individuals must know when to follow the advice of other people, it is critical for them to know when to adopt the recommendations from their AI. Well-calibrated trust is thought to be a fundamental aspect of this type of knowledge. We compare the ability of a common trust inventory and the ability of a behavioral measure of trust to predict compliance and success in a reconnaissance mission. We interpret the experimental results to suggest that the behavioral measure is a better predictor of overall mission compliance and success. We discuss how this measure could possibly be used in compliance interventions and related open questions.},
keywords = {AI, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Yunzhe; Gurney, Nikolos; Zhou, Jincheng; Pynadath, David V.; Ustun, Volkan
Route Optimization in Service of a Search and Rescue Artificial Social Intelligence Agent Book Section
In: Gurney, Nikolos; Sukthankar, Gita (Ed.): Computational Theory of Mind for Human-Machine Teams, vol. 13775, pp. 220–228, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-21670-1 978-3-031-21671-8, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: Cognitive Architecture, Social Simulation, UARC
@incollection{gurney_route_2022,
title = {Route Optimization in Service of a Search and Rescue Artificial Social Intelligence Agent},
author = {Yunzhe Wang and Nikolos Gurney and Jincheng Zhou and David V. Pynadath and Volkan Ustun},
editor = {Nikolos Gurney and Gita Sukthankar},
url = {https://link.springer.com/10.1007/978-3-031-21671-8_14},
doi = {10.1007/978-3-031-21671-8_14},
isbn = {978-3-031-21670-1 978-3-031-21671-8},
year = {2022},
date = {2022-01-01},
urldate = {2023-02-10},
booktitle = {Computational Theory of Mind for Human-Machine Teams},
volume = {13775},
pages = {220–228},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {Cognitive Architecture, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; Marsella, Stacy; Ustun, Volkan; Pynadath, David V.
Operationalizing Theories of Theory of Mind: A Survey Book Section
In: Gurney, Nikolos; Sukthankar, Gita (Ed.): Computational Theory of Mind for Human-Machine Teams, vol. 13775, pp. 3–20, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-21670-1 978-3-031-21671-8, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: Cognitive Architecture, Social Simulation, UARC
@incollection{gurney_operationalizing_2022,
title = {Operationalizing Theories of Theory of Mind: A Survey},
author = {Nikolos Gurney and Stacy Marsella and Volkan Ustun and David V. Pynadath},
editor = {Nikolos Gurney and Gita Sukthankar},
url = {https://link.springer.com/10.1007/978-3-031-21671-8_1},
doi = {10.1007/978-3-031-21671-8_1},
isbn = {978-3-031-21670-1 978-3-031-21671-8},
year = {2022},
date = {2022-01-01},
urldate = {2023-02-10},
booktitle = {Computational Theory of Mind for Human-Machine Teams},
volume = {13775},
pages = {3–20},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {Cognitive Architecture, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
2021
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 148–155, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{mell_pandemic_2021,
title = {Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478353},
doi = {10.1145/3472306.3478353},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-26},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {148–155},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Jajodia, Aditya; Karpurapu, Abhilash; Merchant, Chirag
Charisma and Learning: Designing Charismatic Behaviors for Virtual Human Tutors Proceedings Article
In: Roll, Ido; McNamara, Danielle; Sosnovsky, Sergey; Luckin, Rose; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, pp. 372–377, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-78270-2.
Abstract | Links | BibTeX | Tags: AI, Social Simulation, UARC
@inproceedings{wang_charisma_2021,
title = {Charisma and Learning: Designing Charismatic Behaviors for Virtual Human Tutors},
author = {Ning Wang and Aditya Jajodia and Abhilash Karpurapu and Chirag Merchant},
editor = {Ido Roll and Danielle McNamara and Sergey Sosnovsky and Rose Luckin and Vania Dimitrova},
url = {https://link.springer.com/chapter/10.1007/978-3-030-78270-2_66},
doi = {10.1007/978-3-030-78270-2_66},
isbn = {978-3-030-78270-2},
year = {2021},
date = {2021-01-01},
booktitle = {Artificial Intelligence in Education},
pages = {372–377},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Charisma is a powerful device of communication. Research on charisma on a specific type of leader in a specific type of organization – teachers in the classroom - has indicated the positive influence of a teacher’s charismatic behaviors, often referred to as immediacy behaviors, on student learning. How do we realize such behaviors in a virtual tutor? How do such behaviors impact student learning? In this paper, we discuss the design of a charismatic virtual human tutor. We developed verbal and nonverbal (with the focus on voice) charismatic strategies and realized such strategies through scripted tutorial dialogues and pre-recorded voices. A study with the virtual human tutor has shown an intriguing impact of charismatic behaviors on student learning.},
keywords = {AI, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Pynadath, David V.; Wang, Ning; Kamireddy, Sreekar
A Markovian Method for Predicting Trust Behavior in Human-Agent Interaction Proceedings Article
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 171–178, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, UARC
@inproceedings{pynadath_markovian_2019,
title = {A Markovian Method for Predicting Trust Behavior in Human-Agent Interaction},
author = {David V. Pynadath and Ning Wang and Sreekar Kamireddy},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3351905},
doi = {10.1145/3349537.3351905},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {171–178},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {Trust calibration is critical to the success of human-agent interaction (HAI). However, individual differences are ubiquitous in people’s trust relationships with autonomous systems. To assist its heterogeneous human teammates calibrate their trust in it, an agent must first dynamically model them as individuals, rather than communicating with them all in the same manner. It can then generate expectations of its teammates’ behavior and optimize its own communication based on the current state of the trust relationship it has with them. In this work, we examine how an agent can generate accurate expectations given observations of only the teammate’s trust-related behaviors (e.g., did the person follow or ignore its advice?). In addition to this limited input, we also seek a specific output: accurately predicting its human teammate’s future trust behavior (e.g., will the person follow or ignore my next suggestion?). In this investigation, we construct a model capable of generating such expectations using data gathered in a humansubject study of behavior in a simulated human-robot interaction (HRI) scenario. We first analyze the ability of measures from a presurvey on trust-related traits to accurately predict subsequent trust behaviors. However, as the interaction progresses, this effect is dwarfed by the direct experience. We therefore analyze the ability of sequences of prior behavior by the teammate to accurately predict subsequent trust behaviors. Such behavioral sequences have shown to be indicative of the subjective beliefs of other teammates, and we show here that they have a predictive power as well.},
keywords = {MedVR, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Runhe; Becerik-Gerber, Burcin; Lucas, Gale; Southers, Erroll; Pynadath, David V
Information Requirements for Virtual Environments to Study Human-Building Interactions during Active Shooter Incidents Journal Article
In: Computing in Civil Engineering, pp. 8, 2019.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{zhu_information_2019,
title = {Information Requirements for Virtual Environments to Study Human-Building Interactions during Active Shooter Incidents},
author = {Runhe Zhu and Burcin Becerik-Gerber and Gale Lucas and Erroll Southers and David V Pynadath},
url = {https://ascelibrary.org/doi/10.1061/9780784482445.024},
doi = {10.1061/9780784482445.024},
year = {2019},
date = {2019-06-01},
journal = {Computing in Civil Engineering},
pages = {8},
abstract = {Active shooter incidents present an increasing American homeland security threat to public safety and human life. Several municipal law enforcement agencies have released building design guidelines intended to offer increased resilience and resistance to potential attacks. However, these design recommendations mainly focus on terrorist attacks, prioritizing the enhancement of building security, whereas their impact on safety during active shooter incidents, and corresponding human-building interactions (HBIs) that influence the outcomes (response performance), remain unclear. To respond to this research gap, virtual reality, with its ability to manipulate environmental variables and scenarios while providing safe non-invasive environments, could be a promising method to conduct human-subject studies in the context of active shooter incidents. In this paper, we identify the requirements for developing virtual environments that represent active shooter incidents in buildings to study HBIs and their impacts on the response performance. Key components constituting virtual environments were considered and presented. These include: (1) what types of buildings should be modeled in virtual environments; (2) how to select protective building design recommendations for active shooter incidents and model them in virtual environments; (3) what types of adversary and crowd behavior should be modeled; and (4) what types of interactions among participants, buildings, adversaries, and crowds should be included in virtual environments. Findings on the above key components were summarized to provide recommendations for future research directions.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2018
Wang, Ning; Schwartz, David; Lewine, Gabrielle; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy
Addressing Sexist Attitudes on a College Campus through Virtual Role-Play with Digital Doppelgangers Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents - IVA '18, pp. 219–226, ACM Press, Sydney, NSW, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{wang_addressing_2018,
title = {Addressing Sexist Attitudes on a College Campus through Virtual Role-Play with Digital Doppelgangers},
author = {Ning Wang and David Schwartz and Gabrielle Lewine and Ari Shapiro and Andrew Feng and Cindy Zhuang},
url = {http://dl.acm.org/citation.cfm?doid=3267851.3267913},
doi = {10.1145/3267851.3267913},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents - IVA '18},
pages = {219–226},
publisher = {ACM Press},
address = {Sydney, NSW, Australia},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. Digital doppelgangers possess great potential to serve as powerful models for behavioral change. An emerging technology, the Rapid Avatar Capture and Simulation (RACAS) system, enables low-cost and high-speed scanning of a human user and creation of a digital doppelganger that is a fully animatable virtual 3D model of the user. We designed a virtual role-playing game, DELTA, that implements a powerful cognitive dissonance-based paradigm for attitudinal and behavioral change, and integrated it with digital doppelgangers to influence a human user’s attitude towards sexism on college campuses. In this paper, we discuss the design and evaluation the RACAS system and the DELTA game-based environment. Results indicate the potential impact of the DELTA game-based environment in creating an immersive virtual experience for attitudinal change.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Rovira, Ericka; Barnes, Michael J.
Clustering Behavior to Recognize Subjective Beliefs in Human-Agent Teams Proceedings Article
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1495–1503, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{pynadath_clustering_2018,
title = {Clustering Behavior to Recognize Subjective Beliefs in Human-Agent Teams},
author = {David V. Pynadath and Ning Wang and Ericka Rovira and Michael J. Barnes},
url = {https://dl.acm.org/citation.cfm?id=3237923},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {1495–1503},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Trust is critical to the success of human-agent teams, and a critical antecedents to trust is transparency. To best interact with human teammates, an agent explain itself so that they understand its decision-making process. However, individual differences among human teammates require that the agent dynamically adjust its explanation strategy based on their unobservable subjective beliefs. The agent must therefore recognize its teammates' subjective beliefs relevant to trust-building (e.g., their understanding of the agent's capabilities and process). We leverage a nonparametric method to enable an agent to use its history of prior interactions as a means for recognizing and predicting a new teammate's subjective beliefs. We first gather data combining observable behavior sequences with survey-based observations of typically unobservable perceptions. We then use a nearest-neighbor approach to identify the prior teammates most similar to the new one. We use these neighbors' responses to infer the likelihood of possible beliefs, as in collaborative filtering. The results provide insights into the types of beliefs that are easy (and hard) to infer from purely behavioral observations.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Schwartz, David; Goldberg, Stephen L.
An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger Proceedings Article
In: Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE), pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC, Virtual Humans
@inproceedings{wang_analysis_2018,
title = {An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and David Schwartz and Stephen L. Goldberg},
url = {http://ceur-ws.org/Vol-2141/paper3.pdf},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE)},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. Using a low-cost and high-speed computer graphics and character animation technology, we created digital doppelgangers of students and placed them in a learning-byexplaining task where they interacted with digital doppelgangers of themselves. We investigate the research question of how does increasing the similarity of the physical appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual human listener in a learning-by-explaining paradigm. It presents an analysis of how students’ perceptions of the resemblance impact their learning experience and outcomes. The analysis and results offer insight into the promise and limitation of the application of this novel technology to pedagogical agents research.},
keywords = {ARL, DoD, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V; Wang, Ning; Yang, Richard
Simulating Collaborative Learning through Decision- Theoretic Agents Proceedings Article
In: Proceedings of the Assessment and Intervention during Team Tutoring Workshop, CEUR-WS.org, London, UK, 2018.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{pynadath_simulating_2018,
title = {Simulating Collaborative Learning through Decision- Theoretic Agents},
author = {David V Pynadath and Ning Wang and Richard Yang},
url = {http://ceur-ws.org/Vol-2153/paper5.pdf},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the Assessment and Intervention during Team Tutoring Workshop},
publisher = {CEUR-WS.org},
address = {London, UK},
abstract = {Simulation for team training has a long history of success in medical care and emergency response. In fields where individuals work together to make decisions and perform actions under extreme time pressure and risk (as in military teams), simulations offer safe and repeatable environments for teams to learn and practice without real-world consequences. In our team-based training simulation, we use intelligent agents to represent individual learners and to autonomously generate behavior while learning to perform a joint task. Our agents are built upon PsychSim, a social-simulation framework that uses decision theory to provide domain-independent, quantitative algorithms for representing and reasoning about uncertainty and conflicting goals. We present a collaborative learning testbed in which two PsychSim agents performed a joint “capture-the-flag” mission in the presence of an enemy agent. The testbed supports a reinforcement-learning capability that enables the agents to revise their decision-theoretic models based on their experiences in performing the target task. We can “train” these agents by having them repeatedly perform the task and refine their models through reinforcement learning. We can then “test” the agents by measuring their performance once their learning has converged to a final policy. Repeating this trainand-test cycle across different parameter settings (e.g., priority of individual vs. team goals) and learning configurations (e.g., train with the same teammate vs. train with different teammates) yields a reusable methodology for characterizing the learning outcomes and measuring the impact of such variations on training effectiveness.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Barnes, Michael J.; Wang, Ning; Chen, Jessie Y. C.
Transparency Communication for Machine Learning in Human-Automation Interaction Book Section
In: Human and Machine Learning, pp. 75–90, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-90402-3 978-3-319-90403-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@incollection{pynadath_transparency_2018,
title = {Transparency Communication for Machine Learning in Human-Automation Interaction},
author = {David V. Pynadath and Michael J. Barnes and Ning Wang and Jessie Y. C. Chen},
url = {http://link.springer.com/10.1007/978-3-319-90403-0_5},
doi = {10.1007/978-3-319-90403-0_5},
isbn = {978-3-319-90402-3 978-3-319-90403-0},
year = {2018},
date = {2018-06-01},
booktitle = {Human and Machine Learning},
pages = {75–90},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Technological advances offer the promise of autonomous systems to form human-machine teams that are more capable than their individual members. Understanding the inner workings of the autonomous systems, especially as machine-learning (ML) methods are being widely applied to the design of such systems, has become increasingly challenging for the humans working with them. The “black-box” nature of quantitative ML approaches poses an impediment to people’s situation awareness (SA) of these ML-based systems, often resulting in either disuse or over-reliance of autonomous systems employing such algorithms. Research in human-automation interaction has shown that transparency communication can improve teammates’ SA, foster the trust relationship, and boost the human-automation team’s performance. In this chapter, we will examine the implications of an agent transparency model for human interactions with ML-based agents using automated explanations. We will discuss the application of a particular ML method, reinforcement learning (RL), in Partially Observable Markov Decision Process (POMDP)-based agents, and the design of explanation algorithms for RL in POMDPs.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Pynadath, David V.; Wang, Ning; Rovira, Ericka; Barnes, Michael J.
A Nearest-Neighbor Approach to Recognizing Subjective Beliefs in Human-Robot Interaction Proceedings Article
In: Proceedings of The AAAI Workshop on Plan, Activity, and Intent Recognition (PAIR), Association for the Advancement of Artificial Intelligence, London, UK, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{pynadath_nearest-neighbor_2018,
title = {A Nearest-Neighbor Approach to Recognizing Subjective Beliefs in Human-Robot Interaction},
author = {David V. Pynadath and Ning Wang and Ericka Rovira and Michael J. Barnes},
url = {https://aied2018.utscic.edu.au/proceedings/},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of The AAAI Workshop on Plan, Activity, and Intent Recognition (PAIR)},
publisher = {Association for the Advancement of Artificial Intelligence},
address = {London, UK},
abstract = {Trust is critical to the success of human-robot interaction (HRI), and one of the critical antecedents to trust is transparency. To best interact with human teammates, a robot must be able to ensure that they understand its decision-making process. Recent work has developed automated explanation methods that can achieve this goal. However, individual differences among human teammates require that the robot dynamically adjust its explanation strategy based on their unobservable subjective beliefs. We therefore need methods by which a robot can recognize its teammates’ subjective beliefs relevant to trust-building (e.g., their understanding of the robot’s capabilities and process). We leverage a nonparametric method, common across many fields of artificial intelligence, to enable a robot to use its history of prior interactions as a means for recognizing and predicting a new teammate’s subjective beliefs. We first gather data combining observable behavior sequences with surveybased observations of typically unobservable subjective beliefs. We then use a nearest-neighbor approach to identify the prior teammates most similar to the new one. We use these neighbors to infer the likelihood of possible subjective beliefs, and the results provide insights into the types of subjective beliefs that are easy (and hard) to infer from purely behavioral observations.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Merchant, Chirag; Schwartz, David; Goldberg, Stephen L.
Learning by Explaining to a Digital Doppelganger Book Section
In: Intelligent Tutoring Systems, vol. 10858, pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, Social Simulation, UARC, Virtual Humans
@incollection{wang_learning_2018,
title = {Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and Chirag Merchant and David Schwartz and Stephen L. Goldberg},
url = {http://link.springer.com/10.1007/978-3-319-91464-0_25},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-05-01},
booktitle = {Intelligent Tutoring Systems},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. An emerging computer animation technology makes the creation of digital doppelgangers an accessible reality. This allows researchers in pedagogical agents to explore previously unexplorable research questions, such as how does increasing the similarity in appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual listener in a learning-by-explaining paradigm. Results offer insight into the promise and limitation of this novel technology.},
keywords = {ARL, DoD, MedVR, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pynadath, David V.; Rovira, Ericka; Barnes, Michael J.; Hill, Susan G.
In: Persuasive Technology, vol. 10809, pp. 56–69, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-78977-4 978-3-319-78978-1.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@incollection{wang_is_2018,
title = {Is It My Looks? Or Something I Said? The Impact of Explanations, Embodiment, and Expectations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Ericka Rovira and Michael J. Barnes and Susan G. Hill},
url = {http://link.springer.com/10.1007/978-3-319-78978-1_5},
doi = {10.1007/978-3-319-78978-1_5},
isbn = {978-3-319-78977-4 978-3-319-78978-1},
year = {2018},
date = {2018-04-01},
booktitle = {Persuasive Technology},
volume = {10809},
pages = {56–69},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Trust is critical to the success of human-robot interaction. Research has shown that people will more accurately trust a robot if they have an accurate understanding of its decision-making process. The Partially Observable Markov Decision Process (POMDP) is one such decision-making process, but its quantitative reasoning is typically opaque to people. This lack of transparency is exacerbated when a robot can learn, making its decision making better, but also less predictable. Recent research has shown promise in calibrating human-robot trust by automatically generating explanations of POMDP-based decisions. In this work, we explore factors that can potentially interact with such explanations in influencing human decision-making in human-robot teams. We focus on explanations with quantitative expressions of uncertainty and experiment with common design factors of a robot: its embodiment and its communication strategy in case of an error. Results help us identify valuable properties and dynamics of the human-robot trust relationship.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pynadath, David V.; Barnes, Michael J.; Hill, Susan G.
Comparing Two Automatically Generated Explanations on the Perception of a Robot Teammate Proceedings Article
In: Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction, ACM, Chicago, IL, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{wang_comparing_2018,
title = {Comparing Two Automatically Generated Explanations on the Perception of a Robot Teammate},
author = {Ning Wang and David V. Pynadath and Michael J. Barnes and Susan G. Hill},
url = {http://people.ict.usc.edu/ nwang/PDF/HRI-ERS-2018-Wang.pdf},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction},
publisher = {ACM},
address = {Chicago, IL},
abstract = {Trust is critical to the success of human-robot interaction (HRI). Research has shown that people will more accurately trust a robot if they have a more accurate understanding of its decisionmaking process. Recent research has shown promise in calibrating human-agent trust by automatically generating explanations of decision-making process such as POMDP-based ones. In this paper, we compare two automatically generated explanations, one with quantitative information on uncertainty and one based on sensor observations, and study the impact of such explanations on perception of a robot in human-robot team.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Wang, Ning; Pynadath, David V.; Hill, Susan G.; Merchant, Chirag
The Dynamics of Human-Agent Trust with POMDP-Generated Explanations Proceedings Article
In: Proceedings of the 17th International Conference on Intelligent Virtual Agents (IVA 2017), Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, Social Simulation, UARC
@inproceedings{wang_dynamics_2017,
title = {The Dynamics of Human-Agent Trust with POMDP-Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill and Chirag Merchant},
url = {https://link.springer.com/chapter/10.1007/978-3-319-67401-8_58},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 17th International Conference on Intelligent Virtual Agents (IVA 2017)},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {Partially Observable Markov Decision Processes (POMDPs) enable optimized decision making by robots, agents, and other autonomous systems. This quantitative optimization can also be a limitation in human-agent interaction, as the resulting autonomous behavior, while possibly optimal, is often impenetrable to human teammates, leading to improper trust and, subsequently, disuse or misuse of such systems [1]. Automatically generated explanations of POMDP-based decisions have shown promise in calibrating human-agent trust [3]. However, these “one-size-fits-all” static explanation policies are insufficient to accommodate different communication preferences across people. In this work, we analyze human behavior in a human-robot interaction (HRI) scenario, to find behavioral indicators of trust in the agent’s ability. We evaluate four hypothesized behavioral measures that an agent could potentially use to dynamically infer its teammate’s current trust level. The conclusions drawn can potentially inform the design of intelligent agents that can automatically adapt their explanation policies as they observe the behavioral responses of their human teammates.},
keywords = {ARL, DoD, MedVR, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Schwartz, David; Lewine, Gabrielle; Feng, Andrew Wei-Wen
Virtual Role-Play with Rapid Avatars Book Section
In: Intelligent Virtual Agents, vol. 10498, pp. 463–466, Springer International Publishing, Cham, Switzerland, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@incollection{wang_virtual_2017,
title = {Virtual Role-Play with Rapid Avatars},
author = {Ning Wang and Ari Shapiro and David Schwartz and Gabrielle Lewine and Andrew Wei-Wen Feng},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_59},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {10498},
pages = {463–466},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers possess great potential to serve as powerful models for behavioral change. An emerging technology, the Rapid Avatar Capture and Simulation (RACAS), enables low-cost and high-speed scanning of a human user and creation of a digital doppelganger that is a fully animatable virtual 3D model of the user. We designed a virtual role-playing game, DELTA, with digital doppelgangers to influence a human user’s attitude to-wards sexism on college campuses. In this demonstration, we will showcase the RACAS system and the DELTA game.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
2016
Nazari, Zahra; Gratch, Jonathan
Predictive Models of Malicious Behavior in Human Negotiations Journal Article
In: Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence, pp. 855–861, 2016.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@article{nazari_predictive_2016,
title = {Predictive Models of Malicious Behavior in Human Negotiations},
author = {Zahra Nazari and Jonathan Gratch},
url = {http://www.ijcai.org/Proceedings/16/Papers/126.pdf},
year = {2016},
date = {2016-07-01},
journal = {Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence},
pages = {855–861},
abstract = {Human and artificial negotiators must exchange information to find efficient negotiated agreements, but malicious actors could use deception to gain unfair advantage. The misrepresentation game is a game-theoretic formulation of how deceptive actors could gain disproportionate rewards while seeming honest and fair. Previous research proposed a solution to this game but this required restrictive assumptions that might render it inapplicable to realworld settings. Here we evaluate the formalism against a large corpus of human face-to-face negotiations. We confirm that the model captures how dishonest human negotiators win while seeming fair, even in unstructured negotiations. We also show that deceptive negotiators give-off signals of their malicious behavior, providing the opportunity for algorithms to detect and defeat this malicious tactic.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Jalal-Kamali, Ali; Pynadath, David V.
Toward a Bayesian Network Model of Events in International Relations Proceedings Article
In: Procedings of the 2016 International Conference on Social Computing, Behavioral-Cultural Modeling & Prediction and Behavior Representation in Modeling and Simulation, Springer, Washington D.C., 2016.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{jalal-kamali_toward_2016,
title = {Toward a Bayesian Network Model of Events in International Relations},
author = {Ali Jalal-Kamali and David V. Pynadath},
url = {https://books.google.com/books?id=_HGADAAAQBAJ&pg=PA321&lpg=PA321&dq=Toward+a+Bayesian+network+model+of+events+in+international+relations&source=bl&ots=JBOYm4KCF2&sig=eqmzgrWXwDroEtoLyxZxSjxDIAs&hl=en&sa=X&ved=0ahUKEwiIgoSS8o_PAhUUzGMKHWnaDlEQ6AEILjAC#v=onepage&q=Toward%20a%20Bayesian%20network%20model%20of%20events%20in%20international%20relations&f=false},
year = {2016},
date = {2016-07-01},
booktitle = {Procedings of the 2016 International Conference on Social Computing, Behavioral-Cultural Modeling & Prediction and Behavior Representation in Modeling and Simulation},
publisher = {Springer},
address = {Washington D.C.},
abstract = {Formal models of international relations have a long history of exploiting representations and algorithms from artificial intelligence. As more news sources move online, there is an increasing wealth of data that can inform the creation of such models. The Global Database of Events, Language, and Tone (GDELT) extracts events from news articles from around the world, where the events represent actions taken by geopolitical actors, reflecting the actors’ relationships. We can apply existing machine-learning algorithms to automatically construct a Bayesian network that represents the distribution over the actions between actors. Such a network model allows us to analyze the interdependencies among events and generate the relative likelihoods of different events. By examining the accuracy of the learned network over different years and different actor pairs, we are able to identify aspects of international relations from a data-driven approach.We are also able to identify weaknesses in the model that suggest needs for additional domain knowledge.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Nazari, Zahra; Johnson, Emmanuel
The Misrepresentation Game: How to win at negotiation while seeming like a nice guy Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 728–737, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{gratch_misrepresentation_2016,
title = {The Misrepresentation Game: How to win at negotiation while seeming like a nice guy},
author = {Jonathan Gratch and Zahra Nazari and Emmanuel Johnson},
url = {http://dl.acm.org/citation.cfm?id=2937031},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {728–737},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Recently, interest has grown in agents that negotiate with people: to teach negotiation, to negotiate on behalf of people, and as a chal-lenge problem to advance artificial social intelligence. Humans ne-gotiate differently from algorithmic approaches to negotiation: peo-ple are not purely self-interested but place considerable weight on norms like fairness; people exchange information about their men-tal state and use this to judge the fairness of a social exchange; and people lie. Here, we focus on lying. We present an analysis of how people (or agents interacting with people) might optimally lie (maximally benefit themselves) while maintaining the illusion of fairness towards the other party. In doing so, we build on concepts from game theory and the preference-elicitation literature, but ap-ply these to human, not rational, behavior. Our findings demon-strate clear benefits to lying and provide empirical support for a heuristic – the “fixed-pie lie” – that substantially enhances the effi-ciency of such deceptive algorithms. We conclude with implica-tions and potential defenses against such manipulative techniques.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Rosoff, Heather; John, Richard S.
Semi-Automated Construction of Decision-Theoretic Models of Human Behavior Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 891–899, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{pynadath_semi-automated_2016,
title = {Semi-Automated Construction of Decision-Theoretic Models of Human Behavior},
author = {David V. Pynadath and Heather Rosoff and Richard S. John},
url = {http://dl.acm.org/citation.cfm?id=2937055},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {891–899},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Multiagent social simulation provides a powerful mechanism for policy makers to understand the potential outcomes of their decisions before implementing them. However, the value of such simulations depends on the accuracy of their underlying agent models. In this work, we present a method for automatically exploring a space of decision-theoretic models to arrive at a multiagent social simulation that is consistent with human behavior data. We start with a factored Partially Observable Markov Decision Process (POMDP) whose states, actions, and reward capture the questions asked in a survey from a disaster response scenario. Using input from domain experts, we construct a set of hypothesized dependencies that may or may not exist in the transition probability function. We present an algorithm to search through each of these hypotheses, evaluate their accuracy with respect to the data, and choose the models that best re ect the observed behavior, including individual di⬚erences. The result is a mechanism for constructing agent models that are grounded in human behavior data, while still being able to support hypothetical reasoning that is the main advantage of multiagent social simulation.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 997–1005, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{wang_impact_2016,
title = {The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://dl.acm.org/citation.cfm?id=2937071},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {997–1005},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Researchers have observed that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain effective team performance even when the system is less than 100% reliable. However, current explanation algorithms are not sufficient for making a robot's quantitative reasoning (in terms of both uncertainty and conflicting goals) transparent to human teammates. In this work, we develop a novel mechanism for robots to automatically generate explanations of reasoning based on Partially Observable Markov Decision Problems (POMDPs). Within this mechanism, we implement alternate natural-language templates and then measure their differential impact on trust and team performance within an agent-based online test-bed that simulates a human-robot team task. The results demonstrate that the added explanation capability leads to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot interaction.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations Proceedings Article
In: 2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI), pp. 109–116, IEEE, New Zealand, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{wang_trust_2016,
title = {Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7451741},
doi = {10.1109/HRI.2016.7451741},
year = {2016},
date = {2016-03-01},
booktitle = {2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI)},
pages = {109–116},
publisher = {IEEE},
address = {New Zealand},
abstract = {Trust is a critical factor for achieving the full potential of human-robot teams. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain trust when the system is less than 100% reliable. In this work, we leverage existing agent algorithms to provide a domain-independent mechanism for robots to automatically generate such explanations. To measure the explanation mechanism's impact on trust, we collected self-reported survey data and behavioral data in an agent-based online testbed that simulates a human-robot team task. The results demonstrate that the added explanation capability led to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot trust calibration.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Pynadath, David V.
Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events Proceedings Article
In: Proceedings of The 29th International FLAIRS Conference, pp. 44–49, AAAI Press, Key Largo, FL, 2016.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{georgila_towards_2016,
title = {Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events},
author = {Kallirroi Georgila and David V. Pynadath},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12960/12539},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of The 29th International FLAIRS Conference},
pages = {44–49},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Accurate multiagent social simulation requires a computational model of how people incorporate their observations of real-world events into their beliefs about the state of their world. Current methods for creating such agent-based models typically rely on manual input that can be both burdensome and subjective. In this investigation, we instead pursue automated methods that can translate available data into the desired computational models. For this purpose, we use a corpus of real-world events in combination with longitudinal public opinion polls on a variety of opinion issues. We perform two experiments using automated methods taken from the literature. In our first experiment, we train maximum entropy classifiers to model changes in opinion scores as a function of real-world events. We measure and analyze the accuracy of our learned classifiers by comparing the opinion scores they generate against the opinion scores occurring in a held-out subset of our corpus. In our second experiment, we learn Bayesian networks to capture the same function.We then compare the dependency structures induced by the two methods to identify the event features that have the most significant effect on changes in public opinion.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Building Trust in a Human-Robot Team with Automatically Generated Explanations Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation
@inproceedings{wang_building_2015,
title = {Building Trust in a Human-Robot Team with Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Technological advances offer the promise of robotic systems that work with people to form human-robot teams that are more capable than their individual members. Unfortunately, the increasing capability of such autonomous systems has often failed to increase the capability of the human-robot team. Studies have identified many causes underlying these failures, but one critical aspect of a successful human-machine interaction is trust. When robots are more suited than humans for a certain task, we want the humans to trust the robots to perform that task. When the robots are less suited, we want the humans to appropriately gauge the robots’ ability and have people perform the task manually. Failure to do so results in disuse of robots in the former case and misuse in the latter. Real-world case studies and laboratory experiments show that failures in both cases are common. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies show that explanations offered by an automated system can help maintain trust with the humans in case the system makes an error, indicating that the robot’s communication transparency can be an important factor in earning an appropriate level of trust. To study how robots can communicate their decisionmaking process to humans, we have designed an agent-based online test-bed that supports virtual simulation of domain-independent human-robot interaction. In the simulation, humans work together with virtual robots as a team. The test-bed allows researchers to conduct online human-subject studies and gain better understanding of how robot communication can improve human-robot team performance by fostering better trust relationships between humans and their robot teammates. In this paper, we describe the details of our design, and illustrate its operation with an example human-robot team reconnaissance task.},
keywords = {ARL, DoD, Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Merchant, Chirag
Toward Acquiring a Human Behavior Model of Competition vs. Cooperation Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation
@inproceedings{pynadath_toward_2015,
title = {Toward Acquiring a Human Behavior Model of Competition vs. Cooperation},
author = {David V. Pynadath and Ning Wang and Chirag Merchant},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {One of the challenges in modeling human behavior is accurately capturing the conditions under which people will behave selfishly or selflessly. Researchers have been unable to craft purely cooperative (or competitive) scenarios without significant numbers of subjects displaying unintended selfish (or selfless) behavior (e.g., Rapoport & Chammah, 1965). In this work, rather than try to further isolate competitive vs. cooperative behavior, we instead construct an experimental setting that deliberately includes both, in a way that fits within an operational simulation model. Using PsychSim, a multiagent social simulation framework with both Theory of Mind and decision theory, we have implemented an online resource allocation game called “Team of Rivals”, where four players seek to defeat a common enemy. The players have individual pools of resources which they can allocate toward that common goal. In addition to their progress toward this common goal, the players also receive individual feedback, in terms of the number of resources they own and have won from the enemy. By giving the players both an explicit cooperative goal and implicit feedback on potential competitive goals, we give them room to behave anywhere on the spectrum between these two extremes. Furthermore, by moving away from the more common two-player laboratory settings (e.g., Prisoner’s Dilemma), we can observe differential behavior across the richer space of possible interpersonal relationships. We discuss the design of the game that allows us to observe and analyze these relationships from human behavior data acquired through this game. We then describe decision-theoretic agents that can simulate hypothesized variations on human behavior. Finally, we present results of a preliminary playtest of the testbed and discuss the gathered data.},
keywords = {MedVR, Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
The Appraisal Equivalence Hypothesis: Verifying the domain-independence of a computational model of emotion dynamics Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{gratch_appraisal_2015,
title = {The Appraisal Equivalence Hypothesis: Verifying the domain-independence of a computational model of emotion dynamics},
author = {Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/The%20Appraisal%20Equivalence%20Hypothesis-Verifying%20the%20domain-independence%20of%20a%20computational%20model%20of%20emotion%20dynamics.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Appraisal theory is the most influential theory within affective computing, and serves as the basis for several computational models of emotion. The theory makes strong claims of domain-independence: seemingly different situations, both within and across domains are claimed to produce the identical emotional responses if and only if they are appraised the same way. This article tests this claim, and the predictions of a computational model that embodies it, in two very different interactive games. The results extend prior empirical evidence for appraisal theory to situations where emotions unfold and change over time.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Alfonso, Bexy; Pynadath, David V.; Lhommet, Margot; Marsella, Stacy
Emotional Perception for Updating Agents’ Beliefs Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{alfonso_emotional_2015,
title = {Emotional Perception for Updating Agents’ Beliefs},
author = {Bexy Alfonso and David V. Pynadath and Margot Lhommet and Stacy Marsella},
url = {http://ict.usc.edu/pubs/Emotional%20Perception%20for%20Updating%20Agents%e2%80%99%20Beliefs.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {The relative influence of perception and situation in emotional judgments has been extensively debated in psychology. A main issue in this debate concerns how these sources of information are integrated. This work proposes a method able to make probabilistic predictions of appraisals of other agents, using mental models of those agents. From these appraisal predictions, predictions about another agent’s expressions are made, integrated with observations of the other agent’s ambiguous emotional expressions using Bayesian techniques, resulting in updates to the agent’s mental models. Our method is inspired by psychological work on human interpretation of emotional expressions. We demonstrate how these appraisals of others’ emotions and observations of their expressions can be an integral part of an agent capable of Theory of Mind reasoning.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Morency, Louis-Philippe; DeVault, David; Hartholt, Arno; Fast, Edward; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Stacy, Marcella; Traum, David; Rizzo, Albert
A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews Proceedings Article
In: Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on, pp. 787–789, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{stratou_demonstration_2015,
title = {A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews},
author = {Giota Stratou and Louis-Philippe Morency and David DeVault and Arno Hartholt and Edward Fast and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Marcella Stacy and David Traum and Albert Rizzo},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7344661},
doi = {10.1109/ACII.2015.7344661},
year = {2015},
date = {2015-09-01},
booktitle = {Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on},
pages = {787–789},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. With this demo, we focus our attention on the perception part of the system, a multimodal framework which captures and analyzes user state behavior for both behavioral understanding and interactional purposes. We will demonstrate real-time user state sensing as a part of the SimSensei architecture and discuss how this technology enabled automatic analysis of behaviors related to psychological distress.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Leuski, Anton; Marsella, Stacy; Casas, Dan; Kang, Sin-Hwa; Shapiro, Ari
A Platform for Building Mobile Virtual Humans Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 310–319, Springer, Delft, Netherlands, 2015.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{feng_platform_2015,
title = {A Platform for Building Mobile Virtual Humans},
author = {Andrew Feng and Anton Leuski and Stacy Marsella and Dan Casas and Sin-Hwa Kang and Ari Shapiro},
url = {http://ict.usc.edu/pubs/A%20Platform%20for%20Building%20Mobile%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {310--319},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {We describe an authoring framework for developing virtual humans on mobile applications. The framework abstracts many elements needed for virtual human generation and interaction, such as the rapid development of nonverbal behavior, lip syncing to speech, dialogue management, access to speech transcription services, and access to mobile sensors such as the microphone, gyroscope and location components.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
When the going gets tough: Grit predicts costly perseverance Journal Article
In: Journal of Research in Personality, vol. 59, pp. 15–22, 2015, ISSN: 00926566.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@article{lucas_when_2015,
title = {When the going gets tough: Grit predicts costly perseverance},
author = {Gale M. Lucas and Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/When%20the%20going%20gets%20tough-Grit%20predicts%20costly%20perseverance.pdf},
doi = {10.1016/j.jrp.2015.08.004},
issn = {00926566},
year = {2015},
date = {2015-08-01},
journal = {Journal of Research in Personality},
volume = {59},
pages = {15–22},
abstract = {In this research, we investigate how grittier individuals might incur some costs by persisting when they could move on. Grittier participants were found to be less willing to give up when failing even though they were likely to incur a cost for their persistence. First, grittier participants are more willing to risk failing to complete a task by persisting on individual items. Second, when they are losing, they expend more effort and persist longer in a game rather than quit. Gritty participants have more positive emotions and expectations toward the task, which mediates the relationship between grit and staying to persist when they are losing. Results show gritty individuals are more willing to risk suffering monetary loss to persist.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}