Publications
Search
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Visualization of social emotional appraisal process of an agent Inproceedings
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW), pp. 1–2, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-021-3.
@inproceedings{sato_visualization_2021,
title = {Visualization of social emotional appraisal process of an agent},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9666329/},
doi = {10.1109/ACIIW52867.2021.9666329},
isbn = {978-1-66540-021-3},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)},
pages = {1--2},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan
Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 139–144, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{johnson_comparing_2021,
title = {Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478354},
doi = {10.1145/3472306.3478354},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {139--144},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan; Boberg, Jill; DeVault, David; Kim, Peter; Lucas, Gale
Using Intelligent Agents to Examine Gender in Negotiations Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 90–97, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{johnson_using_2021,
title = {Using Intelligent Agents to Examine Gender in Negotiations},
author = {Emmanuel Johnson and Jonathan Gratch and Jill Boberg and David DeVault and Peter Kim and Gale Lucas},
url = {https://dl.acm.org/doi/10.1145/3472306.3478348},
doi = {10.1145/3472306.3478348},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {90--97},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Terada, Kazunori; Okazoe, Mitsuki; Gratch, Jonathan
Effect of politeness strategies in dialogue on negotiation outcomes Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 195–202, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{terada_effect_2021,
title = {Effect of politeness strategies in dialogue on negotiation outcomes},
author = {Kazunori Terada and Mitsuki Okazoe and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478336},
doi = {10.1145/3472306.3478336},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {195--202},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Lucas, Gale
Rapport Between Humans and Socially Interactive Agents Incollection
In: Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.): The Handbook on Socially Interactive Agents, pp. 433–462, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
@incollection{gratch_rapport_2021,
title = {Rapport Between Humans and Socially Interactive Agents},
author = {Jonathan Gratch and Gale Lucas},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/10.1145/3477322.3477335},
doi = {10.1145/3477322.3477335},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {The Handbook on Socially Interactive Agents},
pages = {433--462},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale; Gratch, Jonathan
Towards Emotion-Aware Agents For Negotiation Dialogues Inproceedings
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
@inproceedings{chawla_towards_2021,
title = {Towards Emotion-Aware Agents For Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9597427/},
doi = {10.1109/ACII52823.2021.9597427},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-27},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1--8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.)
1, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
@book{lugrin_handbook_2021,
title = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 1: Methods, Behavior, Cognition},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/book/10.1145/3477322},
doi = {10.1145/3477322},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Yin, Yufeng; Lu, Liupei; Xiao, Yao; Xu, Zhi; Cai, Kaijie; Jiang, Haonan; Gratch, Jonathan; Soleymani, Mohammad
Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition Inproceedings
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
@inproceedings{yin_contrastive_2021,
title = {Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition},
author = {Yufeng Yin and Liupei Lu and Yao Xiao and Zhi Xu and Kaijie Cai and Haonan Jiang and Jonathan Gratch and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9597453/},
doi = {10.1109/ACII52823.2021.9597453},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1--8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Horstmann, Aike C.; Gratch, Jonathan; Krämer, Nicole C.
I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person Journal Article
In: International Journal of Human-Computer Studies, pp. 102683, 2021, ISSN: 10715819.
@article{horstmann_i_2021,
title = {I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person},
author = {Aike C. Horstmann and Jonathan Gratch and Nicole C. Krämer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1071581921001014},
doi = {10.1016/j.ijhcs.2021.102683},
issn = {10715819},
year = {2021},
date = {2021-06-01},
urldate = {2021-06-18},
journal = {International Journal of Human-Computer Studies},
pages = {102683},
abstract = {Previous research focused on differences between interacting with a person-controlled avatar and a computer-controlled virtual agent. This study however examines an aspiring form of technology called agent representative which constitutes a mix of the former two interaction partner types since it is a computer agent which was previously instructed by a person to take over a task on the person’s behalf. In an experimental lab study with a 2 x 3 between-subjects-design (N = 195), people believed to study together either with an agent representative, avatar, or virtual agent. The interaction partner was described to either possess high or low expertise, while always giving negative feedback regarding the participant’s performance. Results show small but interesting differences regarding the type of agency. People attributed the most agency and blame to the person(s) behind the software and reported the most negative affect when interacting with an avatar, which was less the case for a person’s agent representative and the least for a virtual agent. Level of expertise had no significant effect and other evaluation measures were not affected.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: iScience, vol. 24, no. 3, pp. 102228, 2021, ISSN: 25890042.
@article{de_melo_heuristic_2021,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso M. Melo and Jonathan Gratch and Frank Krueger},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2589004221001966},
doi = {10.1016/j.isci.2021.102228},
issn = {25890042},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {iScience},
volume = {24},
number = {3},
pages = {102228},
abstract = {Autonomous machines are poised to become pervasive, but most treat machines differently: we are willing to violate social norms and less likely to display altruism toward machines. Here, we report an unexpected effect that those impacted by COVID-19—as measured by a post-traumatic stress disorder scale—show a sharp reduction in this difference. Participants engaged in the dictator game with humans and machines and, consistent with prior research on disasters, those impacted by COVID-19 displayed more altruism to other humans. Unexpectedly, participants impacted by COVID-19 displayed equal altruism toward human and machine partners. A mediation analysis suggests that altruism toward machines was explained by an increase in heuristic thinking—reinforcing prior theory that heuristic thinking encourages people to treat machines like people—and faith in technology—perhaps reflecting long-term consequences on how we act with machines. These findings give insight, but also raise concerns, for the design of technology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data Journal Article
In: J Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
@article{mell_expert-model_2021,
title = {An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-021-00368-w},
doi = {10.1007/s12193-021-00368-w},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-15},
journal = {J Multimodal User Interfaces},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other techniques (such as linear regression models or boosted decision trees). In a follow-up study, we show that the most successful models change as the dataset size increases and the prediction targets change, and show that boosted decision trees may not be suitable for the negotiation domain. We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation. Further, we show that this approach can be a stepping stone from purely exploratory research to targeted human-behavioral experimentation. Through our approach, areas of social artificial intelligence that have historically benefited from expert knowledge and traditional AI approaches can be combined with more recent proven-effective machine learning algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Incollection
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
@incollection{dharo_towards_2021,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
doi = {10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145--160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English Bilingual Chatbot Incollection
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 41–50, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
@incollection{dharo_masheli_2021,
title = {Masheli: A Choctaw-English Bilingual Chatbot},
author = {Jacqueline Brixey and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_4},
doi = {10.1007/978-981-15-8395-7_4},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {41--50},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to practice. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lee, Minha; Lucas, Gale; Gratch, Jonathan
Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games Journal Article
In: J Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
@article{lee_comparing_2021,
title = {Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games},
author = {Minha Lee and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-020-00356-6},
doi = {10.1007/s12193-020-00356-6},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
journal = {J Multimodal User Interfaces},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In a comparative manner, we observed how perceived minds of agents shape people’s behavior in the dictator game, ultimatum game, and negotiation against artificial agents. To do so, we varied agents’ minds on two dimensions of the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude) via descriptions and dialogs. In our first study, agents with emotional capacity garnered more allocations in the dictator game, but in the ultimatum game, agents’ described agency and affective capacity, both led to greater offers. In the second study on negotiation, agents ascribed with low-agency traits earned more points than those with high-agency traits, though the negotiation tactic was the same for all agents. Although patiency did not impact game points, participants sent more happy and surprise emojis and emotionally valenced messages to agents that demonstrated emotional capacity during negotiations. Further, our exploratory analyses indicate that people related only to agents with perceived affective aptitude across all games. Both perceived agency and affective capacity contributed to moral standing after dictator and ultimatum games. But after negotiations, only agents with perceived affective capacity were granted moral standing. Manipulating mind dimensions of machines has differing effects on how people react to them in dictator and ultimatum games, compared to a more complex economic exchange like negotiation. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations, in contrast with simple economic games.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The Promise and Peril of Automated Negotiators Journal Article
In: Negotiation Journal, vol. 37, no. 1, pp. 13–34, 2021, ISSN: 0748-4526, 1571-9979.
@article{gratch_promise_2021,
title = {The Promise and Peril of Automated Negotiators},
author = {Jonathan Gratch},
url = {https://onlinelibrary.wiley.com/doi/10.1111/nejo.12348},
doi = {10.1111/nejo.12348},
issn = {0748-4526, 1571-9979},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Negotiation Journal},
volume = {37},
number = {1},
pages = {13--34},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The field of Affective Computing: An interdisciplinary perspective Journal Article
In: Transactions of the Japanese Society for Artificial Intelligence, vol. 36, no. 1, pp. 13, 2021.
@article{gratch_field_2021,
title = {The field of Affective Computing: An interdisciplinary perspective},
author = {Jonathan Gratch},
url = {https://people.ict.usc.edu/~gratch/CSCI534/Readings/Gratch%20-%20The%20field%20of%20affective%20computing.pdf},
year = {2021},
date = {2021-01-01},
journal = {Transactions of the Japanese Society for Artificial Intelligence},
volume = {36},
number = {1},
pages = {13},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Risk of Injury in Moral Dilemmas With Autonomous Vehicles Journal Article
In: Front. Robot. AI, vol. 7, pp. 572529, 2021, ISSN: 2296-9144.
@article{de_melo_risk_2021,
title = {Risk of Injury in Moral Dilemmas With Autonomous Vehicles},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {https://www.frontiersin.org/articles/10.3389/frobt.2020.572529/full},
doi = {10.3389/frobt.2020.572529},
issn = {2296-9144},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Front. Robot. AI},
volume = {7},
pages = {572529},
abstract = {As autonomous machines, such as automated vehicles (AVs) and robots, become pervasive in society, they will inevitably face moral dilemmas where they must make decisions that risk injuring humans. However, prior research has framed these dilemmas in starkly simple terms, i.e., framing decisions as life and death and neglecting the influence of risk of injury to the involved parties on the outcome. Here, we focus on this gap and present experimental work that systematically studies the effect of risk of injury on the decisions people make in these dilemmas. In four experiments, participants were asked to program their AVs to either save five pedestrians, which we refer to as the utilitarian choice, or save the driver, which we refer to as the nonutilitarian choice. The results indicate that most participants made the utilitarian choice but that this choice was moderated in important ways by perceived risk to the driver and risk to the pedestrians. As a second contribution, we demonstrate the value of formulating AV moral dilemmas in a game-theoretic framework that considers the possible influence of others’ behavior. In the fourth experiment, we show that participants were more (less) likely to make the utilitarian choice, the more utilitarian (nonutilitarian) other drivers behaved; furthermore, unlike the game-theoretic prediction that decision-makers inevitably converge to nonutilitarianism, we found significant evidence of utilitarianism. We discuss theoretical implications for our understanding of human decision-making in moral dilemmas and practical guidelines for the design of autonomous machines that solve these dilemmas while, at the same time, being likely to be adopted in practice.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kawano, Seiya; Yoshino, Koichiro; Traum, David; Nakamura, Satoshi
Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning Inproceedings
In: 1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction, pp. 21–29, ISCA, 2021.
@inproceedings{kawano_dialogue_2021,
title = {Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning},
author = {Seiya Kawano and Koichiro Yoshino and David Traum and Satoshi Nakamura},
url = {http://www.isca-speech.org/archive/RobotDial_2021/abstracts/4.html},
doi = {10.21437/RobotDial.2021-4},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction},
pages = {21--29},
publisher = {ISCA},
abstract = {A multi-floor dialogue consists of multiple sets of dialogue participants, each conversing within their own floor, but also at least one multicommunicating member who is a participant of multiple floors and coordinating each to achieve a shared dialogue goal. The structure of such dialogues can be complex, involving intentional structure and relations that are within or across floors. In this study, we propose a neural dialogue structure parser based on multi-task learning and an attention mechanism on multi-floor dialogues in a collaborative robot navigation domain. Our experimental results show that our proposed model improved the dialogue structure parsing performance more than those of single models, which are trained on each dialogue structure parsing task in multi-floor dialogues.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Reilly, Adam; Fast, Ed; Mozgai, Sharon
Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos Inproceedings
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–3, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
@inproceedings{hartholt_introducing_2020,
title = {Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos},
author = {Arno Hartholt and Adam Reilly and Ed Fast and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3383652.3423880},
doi = {10.1145/3383652.3423880},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Rapidly creating educational content that is effective, engaging, and low-cost is a challenge. We present Canvas, a tool for educators that addresses this challenge by enabling the generation of educational video, led by an intelligent virtual agent, that combines rapid nonverbal behavior generation techniques with end-user facing authoring tools. With Canvas, educators can easily produce compelling educational videos with a minimum of investment by leveraging existing content provided by the tool (e.g., characters and environments) while incorporating their own custom content (e.g., images and video clips). Canvas has been delivered to the Smithsonian Science Education Center and is currently being evaluated internally before wider release. We discuss the system, feature set, design process, and lessons learned.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Varied Magnitude Favor Exchange in Human-Agent Negotiation Inproceedings
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
@inproceedings{mell_varied_2020,
title = {Varied Magnitude Favor Exchange in Human-Agent Negotiation},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3383652.3423866},
doi = {10.1145/3383652.3423866},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Agents that interact with humans in complex, social tasks need the ability to comprehend as well as employ common social strategies. In negotiation, there is ample evidence of such techniques being used efficaciously in human interchanges. In this work, we demonstrate a new design for socially-aware agents that employ one such technique—favor exchange—in order to gain value when playing against humans. In an online study of a robust, simulated social negotiation task, we show that these agents are effective against real human participants. In particular, we show that agents that ask for favors during the course of a repeated set of negotiations are more successful than those that do not. Additionally, previous work has demonstrated that humans can detect when agents betray them by failing to return favors that were previously promised. By contrast, this work indicates that these betrayal techniques may go largely undetected in complex scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2020
Alavi, Seyed Hossein; Leuski, Anton; Traum, David
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 735–742, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{alavi_which_2020,
title = {Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net?},
author = {Seyed Hossein Alavi and Anton Leuski and David Traum},
url = {https://www.aclweb.org/anthology/2020.lrec-1.92/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {735--742},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We compare two models for corpus-based selection of dialogue responses: one based on cross-language relevance with a cross-language LSTM model. Each model is tested on multiple corpora, collected from two different types of dialogue source material. Results show that while the LSTM model performs adequately on a very large corpus (millions of utterances), its performance is dominated by the cross-language relevance model for a more moderate-sized corpus (ten thousands of utterances).},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Stefanov, Kalin; Gratch, Jonathan
Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma Inproceedings
In: Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG), pp. 8, IEEE, Buenos Aires, Argentina, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{lei_emotion_2020,
title = {Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma},
author = {Su Lei and Kalin Stefanov and Jonathan Gratch},
url = {https://www.computer.org/csdl/proceedings-article/fg/2020/307900a770/1kecIWT5wmA},
doi = {10.1109/FG47880.2020.00123},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)},
pages = {8},
publisher = {IEEE},
address = {Buenos Aires, Argentina},
abstract = {An extensive body of research has examined how specific emotional expressions shape social perceptions and social decisions, yet recent scholarship in emotion research has raised questions about the validity of emotion as a construct. In this article, we contrast the value of measuring emotional expressions with the more general construct of expressivity (in the sense of conveying a thought or emotion through any nonverbal behavior) and develop models that can automatically extract perceived expressivity from videos. Although less extensive, a solid body of research has shown expressivity to be an important element when studying interpersonal perception, particularly in psychiatric contexts. Here we examine the role expressivity plays in predicting social perceptions and decisions in the context of a social dilemma. We show that perceivers use more than facial expressions when making judgments of expressivity and see these expressions as conveying thoughts as well as emotions (although facial expressions and emotional attributions explain most of the variance in these judgments). We next show that expressivity can be predicted with high accuracy using Lasso and random forests. Our analysis shows that features related to motion dynamics are particularly important for modeling these judgments. We also show that learned models of expressivity have value in recognizing important aspects of a social situation. First, we revisit a previously published finding which showed that smile intensity was associated with the unexpectedness of outcomes in social dilemmas; instead, we show that expressivity is a better predictor (and explanation) of this finding. Second, we provide preliminary evidence that expressivity is useful for identifying “moments of interest” in a video sequence.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Leeds, Andrew; Rizzo, Albert ‘Skip’
Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma Inproceedings
In: Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems, pp. 8, ACM, Honolulu, HI, 2020.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{mozgai_iterative_2020,
title = {Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma},
author = {Sharon Mozgai and Arno Hartholt and Andrew Leeds and Albert ‘Skip’ Rizzo},
url = {https://dl.acm.org/doi/abs/10.1145/3334480.3375219},
doi = {10.1145/3334480.3375219},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems},
pages = {8},
publisher = {ACM},
address = {Honolulu, HI},
abstract = {This case study describes the expansion of the BRAVEMIND virtual reality exposure therapy (VRET) system from the domain of combat-related posttraumatic stress disorder (PTSD) to the domain of PTSD due to Military Sexual Trauma (MST). As VRET continues to demonstrate efficacy in treating PTSD across multiple trauma types and anxiety disorders, adapting existing systems and content to new domains while simultaneously maintaining clinical integrity is becoming a high priority. To develop BRAVEMIND-MST we engaged in an iterative participatory design process with psychologists, engineers, and artists. This first-person account of our collaborative development process focuses on three key areas (1) VR Environment, (2) User-Avatar State, and (3) Events, while detailing the challenges we encountered and lessons learned. This process culminated in eight design guidelines as a first-step in defining a VRET domain transfer methodology.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Inproceedings
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bellas, Alexandria; Perrin, Stefawn; Malone, Brandon; Rogers, Kaytlin; Lucas, Gale; Phillips, Elizabeth; Tossell, Chad; de Visser, Ewart
Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams Inproceedings
In: Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS), pp. 160–163, IEEE, Charlottesville, VA, USA, 2020, ISBN: 978-1-72817-145-6.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{bellas_rapport_2020,
title = {Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams},
author = {Alexandria Bellas and Stefawn Perrin and Brandon Malone and Kaytlin Rogers and Gale Lucas and Elizabeth Phillips and Chad Tossell and Ewart de Visser},
url = {https://ieeexplore.ieee.org/document/9106643/},
doi = {10.1109/SIEDS49339.2020.9106643},
isbn = {978-1-72817-145-6},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS)},
pages = {160--163},
publisher = {IEEE},
address = {Charlottesville, VA, USA},
abstract = {Conflicts may arise at any time during military debriefing meetings, especially in high intensity deployed settings. When such conflicts arise, it takes time to get everyone back into a receptive state of mind so that they engage in reflective discussion rather than unproductive arguing. It has been proposed by some that the use of social robots equipped with social abilities such as emotion regulation through rapport building may help to deescalate these situations to facilitate critical operational decisions. However, in military settings, the same AI agent used in the pre-brief of a mission may not be the same one used in the debrief. The purpose of this study was to determine whether a brief rapport-building session with a social robot could create a connection between a human and a robot agent, and whether consistency in the embodiment of the robot agent was necessary for maintaining this connection once formed. We report the results of a pilot study conducted at the United States Air Force Academy which simulated a military mission (i.e., Gravity and Strike). Participants’ connection with the agent, sense of trust, and overall likeability revealed that early rapport building can be beneficial for military missions.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert "Skip"
An Adaptive Agent-Based Interface for Personalized Health Interventions Inproceedings
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 118–119, ACM, Cagliari Italy, 2020, ISBN: 978-1-4503-7513-9.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@inproceedings{mozgai_adaptive_2020,
title = {An Adaptive Agent-Based Interface for Personalized Health Interventions},
author = {Sharon Mozgai and Arno Hartholt and Albert "Skip" Rizzo},
url = {https://dl.acm.org/doi/10.1145/3379336.3381467},
doi = {10.1145/3379336.3381467},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {118--119},
publisher = {ACM},
address = {Cagliari Italy},
abstract = {This demo introduces a novel mHealth application with an agentbased interface designed to collect multimodal data with passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin) as well as through user self-report. This mHealth application delivers personalized and adaptive multimedia content via smartphone application specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health via novel adaptive logic-based algorithms while employing behavior change techniques (e.g., goal-setting, barrier identification, etc.). A virtual human coach leads all interactions to improve adherence.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert
The Passive Sensing Agent: A Multimodal Adaptive mHealth Application Inproceedings
In: Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), pp. 1–3, IEEE, Austin, TX, USA, 2020, ISBN: 978-1-72814-716-1.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@inproceedings{mozgai_passive_2020,
title = {The Passive Sensing Agent: A Multimodal Adaptive mHealth Application},
author = {Sharon Mozgai and Arno Hartholt and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/9156177/},
doi = {10.1109/PerComWorkshops48775.2020.9156177},
isbn = {978-1-72814-716-1},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
pages = {1--3},
publisher = {IEEE},
address = {Austin, TX, USA},
abstract = {We are demoing the Passive Sensing Agent (PSA), an mHealth virtual human coach, that collects multimodal data through passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin). This virtual human interface delivers adaptive multi-media content via smartphone application that is specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Initially developed for the military, the PSA delivers health interventions (e.g., educational exercises, physical challenges, and performance feedback) matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). A virtual human coach leads all interactions including the firsttime user experience and the brief daily sessions. All interactions were specifically designed to engage and motivate the user while continuously collecting data on their cognitive, emotional, and physical fitness. This multi-component application is integrated and deployed on an iPhone and Apple Watch prototype; a civilian version is currently in-development.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pilly, Praveen K.; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Lerner, Itamar; Jones, Aaron P.; Robert, Bradley; Bryant, Natalie B.; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael D.
In: Frontiers in Neuroscience, vol. 13, pp. 1416, 2020, ISSN: 1662-453X.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{pilly_one-shot_2020,
title = {One-Shot Tagging During Wake and Cueing During Sleep With Spatiotemporal Patterns of Transcranial Electrical Stimulation Can Boost Long-Term Metamemory of Individual Episodes in Humans},
author = {Praveen K. Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Itamar Lerner and Aaron P. Jones and Bradley Robert and Natalie B. Bryant and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael D. Howard},
url = {https://www.frontiersin.org/article/10.3389/fnins.2019.01416/full},
doi = {10.3389/fnins.2019.01416},
issn = {1662-453X},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Neuroscience},
volume = {13},
pages = {1416},
abstract = {Targeted memory reactivation (TMR) during slow-wave oscillations (SWOs) in sleep has been demonstrated with sensory cues to achieve about 5–12% improvement in post-nap memory performance on simple laboratory tasks. But prior work has not yet addressed the one-shot aspect of episodic memory acquisition, or dealt with the presence of interference from ambient environmental cues in real-world settings. Further, TMR with sensory cues may not be scalable to the multitude of experiences over one’s lifetime. We designed a novel non-invasive non-sensory paradigm that tags one-shot experiences of minute-long naturalistic episodes in immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). In particular, we demonstrated that these STAMPs can be reapplied as brief pulses during SWOs in sleep to achieve about 10–20% improvement in the metamemory of targeted episodes compared to the control episodes at 48 hours after initial viewing. We found that STAMPs can not only facilitate but also impair metamemory for the targeted episodes based on an interaction between presleep metamemory and the number of STAMP applications during sleep. Overnight metamemory improvements were mediated by spectral power increases following the offset of STAMPs in the slow-spindle band (8–12 Hz) for left temporal areas in the scalp electroencephalography (EEG) during sleep. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Zhao, Sicheng; Wang, Shangfei; Soleymani, Mohammad; Joshi, Dhiraj; Ji, Qiang
Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey Journal Article
In: ACM Transactions on Multimedia Computing, Communications, and Applications, vol. 15, no. 3s, pp. 1–32, 2020, ISSN: 1551-6857, 1551-6865.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{zhao_affective_2020,
title = {Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey},
author = {Sicheng Zhao and Shangfei Wang and Mohammad Soleymani and Dhiraj Joshi and Qiang Ji},
url = {https://dl.acm.org/doi/10.1145/3363560},
doi = {10.1145/3363560},
issn = {1551-6857, 1551-6865},
year = {2020},
date = {2020-01-01},
journal = {ACM Transactions on Multimedia Computing, Communications, and Applications},
volume = {15},
number = {3s},
pages = {1--32},
abstract = {The wide popularity of digital photography and social networks has generated a rapidly growing volume of multimedia data (i.e., images, music, and videos), resulting in a great demand for managing, retrieving, and understanding these data. Affective computing (AC) of these data can help to understand human behaviors and enable wide applications. In this article, we survey the state-of-the-art AC technologies comprehensively for large-scale heterogeneous multimedia data. We begin this survey by introducing the typical emotion representation models from psychology that are widely employed in AC. We briefly describe the available datasets for evaluating AC algorithms. We then summarize and compare the representative methods on AC of different multimedia types, i.e., images, music, videos, and multimodal data, with the focus on both handcrafted features-based methods and deep learning methods. Finally, we discuss some challenges and future directions for multimedia affective computing.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gennaro, Mauro; Krumhuber, Eva G.; Lucas, Gale
Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood Journal Article
In: Frontiers in Psychology, vol. 10, pp. 3061, 2020, ISSN: 1664-1078.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{de_gennaro_effectiveness_2020,
title = {Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood},
author = {Mauro Gennaro and Eva G. Krumhuber and Gale Lucas},
url = {https://www.frontiersin.org/article/10.3389/fpsyg.2019.03061/full},
doi = {10.3389/fpsyg.2019.03061},
issn = {1664-1078},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Psychology},
volume = {10},
pages = {3061},
abstract = {From past research it is well known that social exclusion has detrimental consequences for mental health. To deal with these adverse effects, socially excluded individuals frequently turn to other humans for emotional support. While chatbots can elicit social and emotional responses on the part of the human interlocutor, their effectiveness in the context of social exclusion has not been investigated. In the present study, we examined whether an empathic chatbot can serve as a buffer against the adverse effects of social ostracism. After experiencing exclusion on social media, participants were randomly assigned to either talk with an empathetic chatbot about it (e.g., “I’m sorry that this happened to you”) or a control condition where their responses were merely acknowledged (e.g., “Thank you for your feedback”). Replicating previous research, results revealed that experiences of social exclusion dampened the mood of participants. Interacting with an empathetic chatbot, however, appeared to have a mitigating impact. In particular, participants in the chatbot intervention condition reported higher mood than those in the control condition. Theoretical, methodological, and practical implications, as well as directions for future research are discussed.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2019
Rosenbloom, Paul S.; Joshi, Himanshu; Ustun, Volkan
(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML Inproceedings
In: Proceedings of the 7th Annual Conference on Advances in Cognitive Systems, pp. 113–131, Cognitive Systems Foundation, Cambridge, MA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_subsymbolic_2019,
title = {(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML},
author = {Paul S. Rosenbloom and Himanshu Joshi and Volkan Ustun},
url = {https://drive.google.com/file/d/1Ynp75A048Mfuh7e3kf_V7hs5kFD7uHsT/view},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 7th Annual Conference on Advances in Cognitive Systems},
pages = {113--131},
publisher = {Cognitive Systems Foundation},
address = {Cambridge, MA},
abstract = {The traditional symbolic versus subsymbolic dichotomy can be decomposed into three more basic dichotomies, to yield a 3D (2×2×2) space in which symbolic/statistical and neural/ML approaches to intelligence appear in opposite corners. Filling in all eight resulting cells then yields a map that spans a number of standard AI approaches plus a few that may be less familiar. Based on this map, four hypotheses are articulated, explored, and evaluated concerning its relevance to both a deeper understanding of the field of AI as a whole and the general capabilities required in complete AI/cognitive systems.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR Inproceedings
In: Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 308–3084, IEEE, San Diego, CA, USA, 2019, ISBN: 978-1-72815-604-0.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{hartholt_ubiquitous_2019,
title = {Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://ieeexplore.ieee.org/document/8942321/},
doi = {10.1109/AIVR46125.2019.00072},
isbn = {978-1-72815-604-0},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {308--3084},
publisher = {IEEE},
address = {San Diego, CA, USA},
abstract = {We present an architecture and framework for the development of virtual humans for a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The framework uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation, and rendering. This work builds on the Virtual Human Toolkit, which has been extended to support computing platforms beyond Windows. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in roomscale VR, autonomous AI in mobile AR, and real-time user performance feedback based on mobile sensors in headset AR.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Srinivasan, Balaji Vasan; Chhaya, Niyati
Generating Formality-Tuned Summaries Using Input-Dependent Rewards Inproceedings
In: Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pp. 833–842, Association for Computational Linguistics, Hong Kong, China, 2019.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{chawla_generating_2019,
title = {Generating Formality-Tuned Summaries Using Input-Dependent Rewards},
author = {Kushal Chawla and Balaji Vasan Srinivasan and Niyati Chhaya},
url = {https://www.aclweb.org/anthology/K19-1078},
doi = {10.18653/v1/K19-1078},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)},
pages = {833--842},
publisher = {Association for Computational Linguistics},
address = {Hong Kong, China},
abstract = {Abstractive text summarization aims at generating human-like summaries by understanding and paraphrasing the given input content. Recent efforts based on sequence-to-sequence networks only allow the generation of a single summary. However, it is often desirable to accommodate the psycho-linguistic preferences of the intended audience while generating the summaries. In this work, we present a reinforcement learning based approach to generate formality-tailored summaries for an input article. Our novel input-dependent reward function aids in training the model with stylistic feedback on sampled and ground-truth summaries together. Once trained, the same model can generate formal and informal summary variants. Our automated and qualitative evaluations show the viability of the proposed framework.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Soleymani, Mohammad; Stefanov, Kalin; Kang, Sin-Hwa; Ondras, Jan; Gratch, Jonathan
Multimodal Analysis and Estimation of Intimate Self-Disclosure Inproceedings
In: Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19, pp. 59–68, ACM Press, Suzhou, China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{soleymani_multimodal_2019,
title = {Multimodal Analysis and Estimation of Intimate Self-Disclosure},
author = {Mohammad Soleymani and Kalin Stefanov and Sin-Hwa Kang and Jan Ondras and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3340555.3353737},
doi = {10.1145/3340555.3353737},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19},
pages = {59--68},
publisher = {ACM Press},
address = {Suzhou, China},
abstract = {Self-disclosure to others has a proven benefit for one’s mental health. It is shown that disclosure to computers can be similarly beneficial for emotional and psychological well-being. In this paper, we analyzed verbal and nonverbal behavior associated with self-disclosure in two datasets containing structured human-human and human-agent interviews from more than 200 participants. Correlation analysis of verbal and nonverbal behavior revealed that linguistic features such as affective and cognitive content in verbal behavior, and nonverbal behavior such as head gestures are associated with intimate self-disclosure. A multimodal deep neural network was developed to automatically estimate the level of intimate self-disclosure from verbal and nonverbal behavior. Between modalities, verbal behavior was the best modality for estimating self-disclosure within-corpora achieving r = 0.66. However, the cross-corpus evaluation demonstrated that nonverbal behavior can outperform language modality in cross-corpus evaluation. Such automatic models can be deployed in interactive virtual agents or social robots to evaluate rapport and guide their conversational strategy.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert “Skip”
Systematic Representative Design and Clinical Virtual Reality Journal Article
In: Psychological Inquiry, vol. 30, no. 4, pp. 231–245, 2019, ISSN: 1047-840X, 1532-7965.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{mozgai_systematic_2019,
title = {Systematic Representative Design and Clinical Virtual Reality},
author = {Sharon Mozgai and Arno Hartholt and Albert “Skip” Rizzo},
url = {https://www.tandfonline.com/doi/full/10.1080/1047840X.2019.1693873},
doi = {10.1080/1047840X.2019.1693873},
issn = {1047-840X, 1532-7965},
year = {2019},
date = {2019-10-01},
journal = {Psychological Inquiry},
volume = {30},
number = {4},
pages = {231--245},
abstract = {The authors of the article, “Causal Inference in Generalizable Environments: Systematic Representative Design”, boldly announce their core point in the opening line of the abstract stating that, “Causal inference and generalizability both matter.” While a surface glance might suggest this to be a simple notion, a closer examination reveals the complexity of what they are proposing. This complexity is apparent when one considers that the bulk of human experimental research has always been challenged in its inability to concurrently deliver on both of these aims. This is no slight on the tens of 1000’s of human researchers and behavioral scientists who have devoted long careers to highly controlled human psychological and social science laboratory research. Rather, it reflects the sheer enormity of the challenges for conducting human studies designed to specify human function with physics-informed lab methods, while at the same time producing results that lead to enhanced understanding and prediction of how people will operate in the complex and ever-changing contexts that make up everyday life. At the core of this issue is a methodological and philosophical challenge that is relevant to all areas of human subjects’ research, beyond the social science focus of the Miller et al. (this issue) article. It is our aim to discuss the central topics in their article through the lens of our own work using Virtual/Augmented Reality and Virtual Human simulation technologies for clinical and training applications},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Haring, Kerstin S.; Tobias, Jessica; Waligora, Justin; Phillips, Elizabeth; Tenhundfeld, Nathan L; LUCAS, Gale; Visser, Ewart J; GRATCH, Jonathan; Tossell, Chad
Conflict Mediation in Human-Machine Teaming: Using a Virtual Agent to Support Mission Planning and Debriefing Inproceedings
In: Proceedings of the 2019 28th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), IEEE, New Delhi, India, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{haring_conflict_2019,
title = {Conflict Mediation in Human-Machine Teaming: Using a Virtual Agent to Support Mission Planning and Debriefing},
author = {Kerstin S. Haring and Jessica Tobias and Justin Waligora and Elizabeth Phillips and Nathan L Tenhundfeld and Gale LUCAS and Ewart J Visser and Jonathan GRATCH and Chad Tossell},
url = {https://ieeexplore.ieee.org/abstract/document/8956414},
doi = {10.1109/RO-MAN46459.2019.8956414},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 28th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
publisher = {IEEE},
address = {New Delhi, India},
abstract = {Socially intelligent artificial agents and robots are anticipated to become ubiquitous in home, work, and military environments. With the addition of such agents to human teams it is crucial to evaluate their role in the planning, decision making, and conflict mediation processes. We conducted a study to evaluate the utility of a virtual agent that provided mission planning support in a three-person human team during a military strategic mission planning scenario. The team consisted of a human team lead who made the final decisions and three supporting roles, two humans and the artificial agent. The mission outcome was experimentally designed to fail and introduced a conflict between the human team members and the leader. This conflict was mediated by the artificial agent during the debriefing process through discuss or debate and open communication strategies of conflict resolution [1]. Our results showed that our teams experienced conflict. The teams also responded socially to the virtual agent, although they did not find the agent beneficial to the mediation process. Finally, teams collaborated well together and perceived task proficiency increased for team leaders. Socially intelligent agents show potential for conflict mediation, but need careful design and implementation to improve team processes and collaboration.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tavabi, Leili; Stefanov, Kalin; Gilani, Setareh Nasihati; Traum, David; Soleymani, Mohammad
Multimodal Learning for Identifying Opportunities for Empathetic Responses Inproceedings
In: Proceedings of the 2019 International Conference on Multimodal Interaction, pp. 95–104, ACM, Suzhou China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tavabi_multimodal_2019,
title = {Multimodal Learning for Identifying Opportunities for Empathetic Responses},
author = {Leili Tavabi and Kalin Stefanov and Setareh Nasihati Gilani and David Traum and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3340555.3353750},
doi = {10.1145/3340555.3353750},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction},
pages = {95--104},
publisher = {ACM},
address = {Suzhou China},
abstract = {Embodied interactive agents possessing emotional intelligence and empathy can create natural and engaging social interactions. Providing appropriate responses by interactive virtual agents requires the ability to perceive users’ emotional states. In this paper, we study and analyze behavioral cues that indicate an opportunity to provide an empathetic response. Emotional tone in language in addition to facial expressions are strong indicators of dramatic sentiment in conversation that warrant an empathetic response. To automatically recognize such instances, we develop a multimodal deep neural network for identifying opportunities when the agent should express positive or negative empathetic responses. We train and evaluate our model using audio, video and language from human-agent interactions in a wizard-of-Oz setting, using the wizard’s empathetic responses and annotations collected on Amazon Mechanical Turk as ground-truth labels. Our model outperforms a textbased baseline achieving F1-score of 0.71 on a three-class classification. We further investigate the results and evaluate the capability of such a model to be deployed for real-world human-agent interactions.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ringeval, Fabien; Messner, Eva-Maria; Song, Siyang; Liu, Shuo; Zhao, Ziping; Mallol-Ragolta, Adria; Ren, Zhao; Soleymani, Mohammad; Pantic, Maja; Schuller, Björn; Valstar, Michel; Cummins, Nicholas; Cowie, Roddy; Tavabi, Leili; Schmitt, Maximilian; Alisamir, Sina; Amiriparian, Shahin
AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition Inproceedings
In: Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19, pp. 3–12, ACM Press, Nice, France, 2019, ISBN: 978-1-4503-6913-8.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ringeval_avec_2019,
title = {AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition},
author = {Fabien Ringeval and Eva-Maria Messner and Siyang Song and Shuo Liu and Ziping Zhao and Adria Mallol-Ragolta and Zhao Ren and Mohammad Soleymani and Maja Pantic and Björn Schuller and Michel Valstar and Nicholas Cummins and Roddy Cowie and Leili Tavabi and Maximilian Schmitt and Sina Alisamir and Shahin Amiriparian},
url = {http://dl.acm.org/citation.cfm?doid=3347320.3357688},
doi = {10.1145/3347320.3357688},
isbn = {978-1-4503-6913-8},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19},
pages = {3--12},
publisher = {ACM Press},
address = {Nice, France},
abstract = {The Audio/Visual Emotion Challenge and Workshop (AVEC 2019) 'State-of-Mind, Detecting Depression with AI, and Cross-cultural Affect Recognition' is the ninth competition event aimed at the comparison of multimedia processing and machine learning methods for automatic audiovisual health and emotion analysis, with all participants competing strictly under the same conditions. The goal of the Challenge is to provide a common benchmark test set for multimodal information processing and to bring together the health and emotion recognition communities, as well as the audiovisual processing communities, to compare the relative merits of various approaches to health and emotion recognition from real-life data. This paper presents the major novelties introduced this year, the challenge guidelines, the data used, and the performance of the baseline systems on the three proposed tasks: state-of-mind recognition, depression assessment with AI, and cross-cultural affect sensing, respectively.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Khashe, Saba; Lucas, Gale; Becerik-Gerber, Burcin; Gratch, Jonathan
Establishing Social Dialog between Buildings and Their Users Journal Article
In: International Journal of Human–Computer Interaction, vol. 35, no. 17, pp. 1545–1556, 2019, ISSN: 1044-7318, 1532-7590.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{khashe_establishing_2019,
title = {Establishing Social Dialog between Buildings and Their Users},
author = {Saba Khashe and Gale Lucas and Burcin Becerik-Gerber and Jonathan Gratch},
url = {https://www.tandfonline.com/doi/full/10.1080/10447318.2018.1555346},
doi = {10.1080/10447318.2018.1555346},
issn = {1044-7318, 1532-7590},
year = {2019},
date = {2019-10-01},
journal = {International Journal of Human–Computer Interaction},
volume = {35},
number = {17},
pages = {1545--1556},
abstract = {Behavioral intervention strategies have yet to become successful in the development of initiatives to foster pro-environmental behaviors in buildings. In this paper, we explored the potentials of increasing the effectiveness of requests aiming to promote pro-environmental behaviors by engaging users in a social dialog, given the effects of two possible personas that are more related to the buildings (i.e., building vs. building manager). We tested our hypotheses and evaluated our findings in virtual and physical environments and found similar effects in both environments. Our results showed that social dialog involvement persuaded respondents to perform more pro-environmental actions. However, these effects were significant when the requests were delivered by an agent representing the building. In addition, these strategies were not equally effective across all types of people and their effects varied for people with different characteristics. Our findings provide useful design choices for persuasive technologies aiming to promote pro-environmental behaviors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon; Fast, Ed; Liewer, Matt; Reilly, Adam; Whitcup, Wendy; Rizzo, Albert "Skip"
Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers Inproceedings
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 205–207, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{hartholt_virtual_2019-1,
title = {Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers},
author = {Arno Hartholt and Sharon Mozgai and Ed Fast and Matt Liewer and Adam Reilly and Wendy Whitcup and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3352766},
doi = {10.1145/3349537.3352766},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {205--207},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {We present one of the first applications of virtual humans in Augmented Reality (AR), which allows young adults with Autism Spectrum Disorder (ASD) the opportunity to practice job interviews. It uses the Magic Leap’s AR hardware sensors to provide users with immediate feedback on six different metrics, including eye gaze, blink rate and head orientation. The system provides two characters, with three conversational modes each. Ported from an existing desktop application, the main development lessons learned were: 1) provide users with navigation instructions in the user interface, 2) avoid dark colors as they are rendered transparently, 3) use dynamic gaze so characters maintain eye contact with the user, 4) use hardware sensors like eye gaze to provide user feedback, and 5) use surface detection to place characters dynamically in the world.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Yanov, Volodymyr; Traum, David; Georgila, Kallirroi
A Wizard of Oz Data Collection Framework for Internet of Things Dialogues Inproceedings
In: Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts, pp. 3, SEMDIAL, London, UK, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_wizard_2019,
title = {A Wizard of Oz Data Collection Framework for Internet of Things Dialogues},
author = {Carla Gordon and Volodymyr Yanov and David Traum and Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z19/Z19-4024/},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts},
pages = {3},
publisher = {SEMDIAL},
address = {London, UK},
abstract = {We describe a novel Wizard of Oz dialogue data collection framework in the Internet of Things domain. Our tool is designed for collecting dialogues between a human user, and 8 different system profiles, each with a different communication strategy. We then describe the data collection conducted with this tool, as well as the dialogue corpus that was generated.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan; Aydogan, Reyhan; Baarslag, Tim; Jonker, Catholijn M.
The Likeability-Success Tradeoff: Results of the 2nd Annual Human-Agent Automated Negotiating Agents Competition Inproceedings
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII), IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mell_likeability-success_2019,
title = {The Likeability-Success Tradeoff: Results of the 2nd Annual Human-Agent Automated Negotiating Agents Competition},
author = {Johnathan Mell and Jonathan Gratch and Reyhan Aydogan and Tim Baarslag and Catholijn M. Jonker},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII)},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {We present the results of the 2nd Annual Human-Agent League of the Automated Negotiating Agent Competition. Building on the success of the previous year’s results, a new challenge was issued that focused exploring the likeability-success tradeoff in negotiations. By examining a series of repeated negotiations, actions may affect the relationship between automated negotiating agents and their human competitors over time. The results presented herein support a more complex view of human-agent negotiation and capture of integrative potential (win-win solutions). We show that, although likeability is generally seen as a tradeoff to winning, agents are able to remain well-liked while winning if integrative potential is not discovered in a given negotiation. The results indicate that the top-performing agent in this competition took advantage of this loophole by engaging in favor exchange across negotiations (cross-game logrolling). These exploratory results provide information about the effects of different submitted “black-box” agents in humanagent negotiation and provide a state-of-the-art benchmark for human-agent design.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Gratch, Jonathan; Parkinson, Brian; Shore, Danielle
Signals of Emotion Regulation in a Social Dilemma: Detection from Face and Context Inproceedings
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII) roceedings of the, pp. 7, IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hoegen_signals_2019,
title = {Signals of Emotion Regulation in a Social Dilemma: Detection from Face and Context},
author = {Rens Hoegen and Jonathan Gratch and Brian Parkinson and Danielle Shore},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII) roceedings of the},
pages = {7},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {In social decision-making tasks, facial expressions are informative signals that indicate motives and intentions. As people are aware that their expressions influence partner behavior, expressions may be strategically regulated in competitive environments to influence a social partner’s decisionmaking. In this work, we examine facial expressions and their strategic regulation within the context of an iterated prisoner’s dilemma. Utilizing video-cued rating procedures, we examine several key questions about the functionality of facial expressions in social decision-making. First, we assess the extent to which emotion and expression regulation are accurately detected from dynamic facial expressions in interpersonal interactions. Second, we explore which facial cues are utilized to evaluate emotion and regulation information. Finally, we investigate the role of context in participants’ emotion and regulation judgments. Results show that participants accurately perceive facial emotion and expression regulation, although they are better at recognizing emotions than regulation. Using automated expression analysis and stepwise regression, we constructed models that use action units from participant videos to predict their video-cued emotion and regulation ratings. We show that these models perform similarly and, in some cases, better than participants do. Moreover, these models demonstrate that game state information improves predictive accuracy, thus implying that context information is important in the evaluation of facial expressions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Gratch, Jonathan
Smiles Signal Surprise in a Social Dilemma Inproceedings
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII), IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lei_smiles_2019,
title = {Smiles Signal Surprise in a Social Dilemma},
author = {Su Lei and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII)},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {This study examines spontaneous facial expressions in an iterated prisoner’s dilemma with financial stakes. Our goal was to identify typical facial expressions associated with key events during the interaction (e.g., cooperation or exploitation) and contrast these reactions with alternative theories of the meaning of facial expressions. Specifically, we examined if expressions reflect individual self-interest (e.g., winning) or social motives (e.g., promoting fairness) and the extent to which surprise might moderate the intensity of facial displays. In contrast to predictions of scientific and folk theories of expression, smiles were the only expressions consistently elicited, regardless of the reward or fairness of outcomes. Further, these smiles serve as a reliable indicator of the surprisingness of the event, but not its pleasure (contradicting research on both the meaning of smiles and indicators of surprise). To our knowledge, this is the first study to indicate that smiles signal surprise.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Rizzo, Albert; Gratch, Jonathan; Scherer, Stefan; Stratou, Giota; Boberg, Jill; Morency, Louis-Philippe
Reporting Mental Health Symptoms: Breaking Down Barriers to Care with Virtual Human Interviewers Incollection
In: The Impact of Virtual and Augmented Reality on Individuals and Society, pp. 256–264, Frontiers Media SA, 2019.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@incollection{lucas_reporting_2019,
title = {Reporting Mental Health Symptoms: Breaking Down Barriers to Care with Virtual Human Interviewers},
author = {Gale M. Lucas and Albert Rizzo and Jonathan Gratch and Stefan Scherer and Giota Stratou and Jill Boberg and Louis-Philippe Morency},
url = {https://books.google.com/books?hl=en&lr=&id=N724DwAAQBAJ&oi=fnd&pg=PP1&dq=The+Impact+of+Virtual+and+Augmented+Reality+on+Individuals+and+Society&ots=ZMD1P9T-K5&sig=Qqh7iHZ4Xq2iRyYecrECHwNNE38#v=onepage&q=The%20Impact%20of%20Virtual%20and%20Augmented%20Reality%20on%20Individuals%20and%20Society&f=false},
year = {2019},
date = {2019-09-01},
booktitle = {The Impact of Virtual and Augmented Reality on Individuals and Society},
pages = {256--264},
publisher = {Frontiers Media SA},
abstract = {A common barrier to healthcare for psychiatric conditions is the stigma associated with these disorders. Perceived stigma prevents many from reporting their symptoms. Stigma is a particularly pervasive problem among military service members, preventing them from reporting symptoms of combat-related conditions like posttraumatic stress disorder (PTSD). However, research shows (increased reporting by service members when anonymous assessments are used. For example, service members report more symptoms of PTSD when they anonymously answer the Post-Deployment Health Assessment (PDHA) symptom checklist compared to the official PDHA, which is identifiable and linked to their military records. To investigate the factors that influence reporting of psychological symptoms by service members, we used a transformative technology: automated virtual humans that interview people about their symptoms. Such virtual human interviewers allow simultaneous use of two techniques for eliciting disclosure that would otherwise be incompatible; they afford anonymity while also building rapport. We examined whether virtual human interviewers could increase disclosure of mental health symptoms among active-duty service members that just returned from a year-long deployment in Afghanistan. Service members reported more symptoms during a conversation with a virtual human interviewer than on the official PDHA. They also reported more to a virtual human interviewer than on an anonymized PDHA. A second, larger sample of active-duty and former service members found a similar effect that approached statistical significance. Because respondents in both studies shared more with virtual human interviewers than an anonymized PDHA—even though both conditions control for stigma and ramifications for service members’ military records—virtual human interviewers that build rapport may provide a superior option to encourage reporting.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Lycan, Bethany; Artstein, Ron
Direct and Mediated Interaction with a Holocaust Survivor Inproceedings
In: Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems, pp. 161–167, Springer, Cham, Switzerland, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lycan_direct_2019,
title = {Direct and Mediated Interaction with a Holocaust Survivor},
author = {Bethany Lycan and Ron Artstein},
url = {https://doi.org/10.1007/978-3-319-92108-2_17},
doi = {10.1007/978-3-319-92108-2_17},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems},
volume = {510},
pages = {161--167},
publisher = {Springer},
address = {Cham, Switzerland},
series = {Lecture Notes in Electrical Engineering},
abstract = {The New Dimensions in Testimony dialogue system was placed in two museums under two distinct conditions: docent-led group interaction, and free interaction with visitors. Analysis of the resulting conversations shows that docent-led interactions have a lower vocabulary and a higher proportion of user utterances that directly relate to the system’s subject matter, while free interaction is more personal in nature. Under docent-led interaction the system gives a higher proportion of direct appropriate responses, but overall correct system behavior is about the same in both conditions because the free interaction condition has more instances where the correct system behavior is to avoid a direct response.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Lukin, Stephanie M.; Tratz, Stephen; Artstein, Ron; Traum, David; Voss, Clare R.
Augmenting Abstract Meaning Representation for Human-Robot Dialogue Inproceedings
In: Proceedings of the First International Workshop on Designing Meaning Representations (DMR), pp. 199–210, Association of Computational Linguistics, Florence, Italy, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{bonial_augmenting_2019,
title = {Augmenting Abstract Meaning Representation for Human-Robot Dialogue},
author = {Claire Bonial and Lucia Donatelli and Stephanie M. Lukin and Stephen Tratz and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/W19-3322},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the First International Workshop on Designing Meaning Representations (DMR)},
pages = {199--210},
publisher = {Association of Computational Linguistics},
address = {Florence, Italy},
abstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Forbus, Kenneth D.
Expanding and Repositioning Cognitive Science Journal Article
In: Topics in Cognitive Science, 2019, ISSN: 1756-8757, 1756-8765.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{rosenbloom_expanding_2019,
title = {Expanding and Repositioning Cognitive Science},
author = {Paul S. Rosenbloom and Kenneth D. Forbus},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/tops.12468},
doi = {10.1111/tops.12468},
issn = {1756-8757, 1756-8765},
year = {2019},
date = {2019-08-01},
journal = {Topics in Cognitive Science},
abstract = {Cognitive science has converged in many ways with cognitive psychology, but while also maintaining a distinctive interdisciplinary nature. Here we further characterize this existing state of the field before proposing how it might be reconceptualized toward a broader and more distinct, and thus more stable, position in the realm of sciences.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gilani, Setareh Nasihati; Traum, David; Sortino, Rachel; Gallagher, Grady; Aaron-Lozano, Kailyn; Padilla, Cryss; Shapiro, Ari; Lamberton, Jason; Petitto, Laura-Ann
Can a Signing Virtual Human Engage a Baby's Attention? Inproceedings
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 162–169, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nasihati_gilani_can_2019,
title = {Can a Signing Virtual Human Engage a Baby's Attention?},
author = {Setareh Nasihati Gilani and David Traum and Rachel Sortino and Grady Gallagher and Kailyn Aaron-Lozano and Cryss Padilla and Ari Shapiro and Jason Lamberton and Laura-Ann Petitto},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329463},
doi = {10.1145/3308532.3329463},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {162--169},
publisher = {ACM Press},
address = {Paris, France},
abstract = {The child developmental period of ages 6-12 months marks a widely understood “critical period” for healthy language learning, during which, failure to receive exposure to language can place babies at risk for language and reading problems spanning life. Deaf babies constitute one vulnerable population as they can experience dramatically reduced or no access to usable linguistic input during this period. Technology has been used to augment linguistic input (e.g., auditory devices; language videotapes) but research finds limitations in learning. We evaluated an AI system that uses an Avatar (provides language and socially contingent interactions) and a robot (aids attention to the Avatar) to facilitate infants’ ability to learn aspects of American Sign Language (ASL), and asked three questions: (1) Can babies with little/no exposure to ASL distinguish among the Avatar’s different conversational modes (Linguistic Nursery Rhymes; Social Gestures; Idle/nonlinguistic postures; 3rd person observer)? (2) Can an Avatar stimulate babies’ production of socially contingent responses, and crucially, nascent language responses? (3) What is the impact of parents’ presence/absence of conversational participation? Surprisingly, babies (i) spontaneously distinguished among Avatar conversational modes, (ii) produced varied socially contingent responses to Avatar’s modes, and (iii) parents influenced an increase in babies’ response tokens to some Avatar modes, but the overall categories and pattern of babies’ behavioral responses remained proportionately similar irrespective of parental participation. Of note, babies produced the greatest percentage of linguistic responses to the Avatar’s Linguistic Nursery Rhymes versus other Avatar conversational modes. This work demonstrates the potential for Avatars to facilitate language learning in young babies.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stocco, Andrea; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the Human Connectome Data Supports the Notion of A “Common Model of Cognition” for Human and Human-Like Intelligence Technical Report
Neuroscience 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@techreport{stocco_analysis_2019,
title = {Analysis of the Human Connectome Data Supports the Notion of A “Common Model of Cognition” for Human and Human-Like Intelligence},
author = {Andrea Stocco and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {http://biorxiv.org/lookup/doi/10.1101/703777},
doi = {10.1101/703777},
year = {2019},
date = {2019-07-01},
pages = {38},
institution = {Neuroscience},
abstract = {The Common Model of Cognition (CMC) is a recently proposed, consensus architecture intended to capture decades of progress in cognitive science on modeling human and human-like intelligence. Because of the broad agreement around it and preliminary mappings of its components to specific brain areas, we hypothesized that the CMC could be a candidate model of the large-scale functional architecture of the human brain. To test this hypothesis, we analyzed functional MRI data from 200 participants and seven different tasks that cover the broad range of cognitive domains. The CMC components were identified with functionally homologous brain regions through canonical fMRI analysis, and their communication pathways were translated into predicted patterns of effective connectivity between regions. The resulting dynamic linear model was implemented and fitted using Dynamic Causal Modeling, and compared against four alternative brain architectures that had been previously proposed in the field of neuroscience (two hierarchical architectures and two hub-and-spoke architectures) using a Bayesian approach. The results show that, in all cases, the CMC vastly outperforms all other architectures, both within each domain and across all tasks. The results suggest that a common, general architecture that could be used for artificial intelligence effectively underpins all aspects of human cognition, from the overall functional architecture of the human brain to higher level thought processes.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Hartholt, Arno; Mozgai, Sharon; Rizzo, Albert "Skip"
Virtual Job Interviewing Practice for High-Anxiety Populations Inproceedings
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 238–240, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{hartholt_virtual_2019,
title = {Virtual Job Interviewing Practice for High-Anxiety Populations},
author = {Arno Hartholt and Sharon Mozgai and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329417},
doi = {10.1145/3308532.3329417},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {238--240},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present a versatile system for training job interviewing skills that focuses specifically on segments of the population facing increased challenges during the job application process. In particular, we target those with Autism Spectrum Disorder (ADS), veterans transitioning to civilian life, and former convicts integrating back into society. The system itself follows the SAIBA framework and contains several interviewer characters, who each represent a different type of vocational field, (e.g. service industry, retail, office, etc.) Each interviewer can be set to one of three conversational modes, which not only affects what they say and how they say it, but also their supporting body language. This approach offers varying difficulties, allowing users to start practicing with interviewers who are more encouraging and accommodating before moving on to personalities that are more direct and indifferent. Finally, the user can place the interviewers in different environmental settings (e.g. conference room, restaurant, executive office, etc.), allowing for many different combinations in which to practice.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes Inproceedings
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 212–214, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{mell_expert-model_2019,
title = {An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329433},
doi = {10.1145/3308532.3329433},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {212--214},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing -more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other, more limited techniques (such as linear regression models or boosted decision trees). We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Minha; Lucas, Gale; Mell, Johnathan; Johnson, Emmanuel; Gratch, Jonathan
What's on Your Virtual Mind?: Mind Perception in Human-Agent Negotiations Inproceedings
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 38–45, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lee_whats_2019,
title = {What's on Your Virtual Mind?: Mind Perception in Human-Agent Negotiations},
author = {Minha Lee and Gale Lucas and Johnathan Mell and Emmanuel Johnson and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329465},
doi = {10.1145/3308532.3329465},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {38--45},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In this article we examine how perceptions of a virtual agent’s mind shape behavior in human-agent negotiations. We varied descriptions and communicative behavior of virtual agents on two dimensions according to the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude). Participants then engaged in negotiations with the different agents. People scored more points and engaged in shorter negotiations with agents described to be cognitively intelligent, and got lower points and had longer negotiations with agents that were described to be cognitively unintelligent. Accordingly, agents described as having low-agency ended up earning more points than those with high-agency. Within the negotiations themselves, participants sent more happy and surprise emojis and emotionally valenced messages to agents described to be emotional. This high degree of described patiency also affected perceptions of the agent’s moral standing and relatability. In short, manipulating the perceived mind of agents affects how people negotiate with them. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S; Ustun, Volkan
An Architectural Integration of Temporal Motivation Theory for Decision Making Inproceedings
In: In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_architectural_2019,
title = {An Architectural Integration of Temporal Motivation Theory for Decision Making},
author = {Paul S Rosenbloom and Volkan Ustun},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_7.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {Temporal Motivation Theory (TMT) is incorporated into the Sigma cognitive architecture to explore the ability of this combination to yield human-like decision making. In conjunction with Lazy Reinforcement Learning (LRL), which provides the inputs required for this form of decision making, experiments are run on a simple reinforcement learning task, a preference reversal task, and an uncertain two-choice task.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S
In: In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_symmetry_2019,
title = {(A)symmetry × (Non)monotonicity: Towards a Deeper Understanding of Key Cognitive Di/Trichotomies and the Common Model of Cognition},
author = {Paul S Rosenbloom},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_6.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {A range of dichotomies from across the cognitive sciences are reduced to either (a)symmetry or (non)monotonicity. Taking the cross-product of these two elemental dichotomies then yields a deeper understanding of both two key trichotomies –based on control and content hierarchies – and the Common Model of Cognition, with results that bear on the structure of integrative cognitive architectures, models and systems, and on their commonalities, differences and gaps.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Roediger, Sarah; Lucas, Gale; Gratch, Jonathan
Assessing Common Errors Students Make When Negotiating Inproceedings
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 30–37, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{johnson_assessing_2019,
title = {Assessing Common Errors Students Make When Negotiating},
author = {Emmanuel Johnson and Sarah Roediger and Gale Lucas and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329470},
doi = {10.1145/3308532.3329470},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {30--37},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Research has shown that virtual agents can be effective tools for teaching negotiation. Virtual agents provide an opportunity for students to practice their negotiation skills which leads to better outcomes. However, these negotiation training agents often lack the ability to understand the errors students make when negotiating, thus limiting their effectiveness as training tools. In this article, we argue that automated opponent-modeling techniques serve as effective methods for diagnosing important negotiation mistakes. To demonstrate this, we analyze a large number of participant traces generated while negotiating with a set of automated opponents. We show that negotiators’ performance is closely tied to their understanding of an opponent’s preferences. We further show that opponent modeling techniques can diagnose specific errors including: failure to elicit diagnostic information from an opponent, failure to utilize the information that was elicited, and failure to understand the transparency of an opponent. These results show that opponent modeling techniques can be effective methods for diagnosing and potentially correcting crucial negotiation errors.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Lehr, Janina; Krämer, Nicole; Gratch, Jonathan
The Effectiveness of Social Influence Tactics when Used by a Virtual Agent Inproceedings
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 22–29, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_effectiveness_2019,
title = {The Effectiveness of Social Influence Tactics when Used by a Virtual Agent},
author = {Gale M. Lucas and Janina Lehr and Nicole Krämer and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329464},
doi = {10.1145/3308532.3329464},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {22--29},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Research in social science distinguishes between two types of social influence: informational and normative. Informational social influence is driven by the desire to evaluate ambiguous situations correctly, whereas normative social influence is driven by the desire to be liked and gain social acceptance from another person. Although we know from research that humans can effectively use either of these techniques to persuade other humans, scholars have yet to examine the relative effectiveness of informational versus normative social influence when used by virtual agents. We report a study in which users interact with a system that persuades them either using informational or normative social influence. Furthermore, to compare agents to human interlocutors, users are told that the system is either teleoperated by a human (avatar) or fully-automated (agent). Using this design, we are able to compare the effectiveness of virtual agents (vs humans) in employing informational versus normative social influence. Participants interacted with the system, which employed a Wizard-of-Oz operated virtual agent that tried to persuade the user to agree with its rankings on a “survival task.” Controlling for initial divergence in rankings between user and the agent, there was a significant main effect such that informational social influence resulted in greater influence than normative influence. However, this was qualified by an interaction that approached significance; users were, if anything, more persuaded by informational influence when they believe the agent was AI (compared to a human), whereas there was no difference between the agent and avatar in the normative influence condition.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Sohail, Usman; Traum, David
A Blissymbolics Translation System Inproceedings
In: Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies, pp. 32–36, Association for Computational Linguistics, Minneapolis, Minnesota, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{sohail_blissymbolics_2019,
title = {A Blissymbolics Translation System},
author = {Usman Sohail and David Traum},
url = {http://aclweb.org/anthology/W19-1705},
doi = {10.18653/v1/W19-1705},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies},
pages = {32--36},
publisher = {Association for Computational Linguistics},
address = {Minneapolis, Minnesota},
abstract = {Blissymbolics (Bliss) is a pictographic writing system that is used by people with communication disorders. Bliss attempts to create a writing system that makes words easier to distinguish by using pictographic symbols that encapsulate meaning rather than sound, as the English alphabet does for example. Users of Bliss rely on human interpreters to use Bliss. We created a translation system from Bliss to natural English with the hopes of decreasing the reliance on human interpreters by the Bliss community. We first discuss the basic rules of Blissymbolics. Then we point out some of the challenges associated with developing computer assisted tools for Blissymbolics. Next we talk about our ongoing work in developing a translation system, including current limitations, and future work. We conclude with a set of examples showing the current capabilities of our translation system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Kyusong; Zhao, Tiancheng; Ultes, Stefan; Rojas-Barahona, Lina; Pincus, Eli; Traum, David; Eskenazi, Maxine
An Assessment Framework for DialPort Incollection
In: Advanced Social Interaction with Agents, vol. 510, pp. 79–85, Springer International Publishing, Cham, 2019, ISBN: 978-3-319-92107-5 978-3-319-92108-2.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{lee_assessment_2019,
title = {An Assessment Framework for DialPort},
author = {Kyusong Lee and Tiancheng Zhao and Stefan Ultes and Lina Rojas-Barahona and Eli Pincus and David Traum and Maxine Eskenazi},
url = {http://link.springer.com/10.1007/978-3-319-92108-2_10},
doi = {10.1007/978-3-319-92108-2_10},
isbn = {978-3-319-92107-5 978-3-319-92108-2},
year = {2019},
date = {2019-06-01},
urldate = {2019-10-28},
booktitle = {Advanced Social Interaction with Agents},
volume = {510},
pages = {79--85},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Collecting a large amount of real human-computer interaction data in various domains is a cornerstone in the development of better data-driven spoken dialog systems. The DialPort project is creating a portal to collect a constant stream of real user conversational data on a variety of topics. In order to keep real users attracted to DialPort, it is crucial to develop a robust evaluation framework to monitor and maintain high performance. Different from earlier spoken dialog systems, DialPort has a heterogeneous set of spoken dialog systems gathered under one outward-looking agent. In order to access this new structure, we have identified some unique challenges that DialPort will encounter so that it can appeal to real users and have created a novel evaluation scheme that quantitatively assesses their performance in these situations. We look at assessment from the point of view of the system developer as well as that of the end user.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Pilly, Praveen K; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Jones, Aaron P.; Bradley, Robert; Bryant, Natalie B.; Lerner, Itamar; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael P.
Spatiotemporal patterns of transcranial electrical stimulation can strengthen the metamemory of individual episodic memories in humans Journal Article
In: bioRxiv, pp. 110, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{pilly_spatiotemporal_2019,
title = {Spatiotemporal patterns of transcranial electrical stimulation can strengthen the metamemory of individual episodic memories in humans},
author = {Praveen K Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Aaron P. Jones and Robert Bradley and Natalie B. Bryant and Itamar Lerner and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael P. Howard},
url = {https://www.biorxiv.org/content/10.1101/672378v1.abstract},
doi = {10.1101/672378},
year = {2019},
date = {2019-06-01},
journal = {bioRxiv},
pages = {110},
abstract = {Long-term retention of memories critically depends on consolidation processes, which occur during slow-wave oscillations (SWOs) in non-rapid eye movement (NREM) sleep. We designed a non-invasive system that can tag one-shot experiences of naturalistic episodes within immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). We demonstrate that these STAMPs can be re-applied during UP states of SWOs on two consecutive nights to achieve a 19.43% improvement in the metamemory of targeted episodes at 48 hours after the one-shot viewing, compared to the control episodes. Further, we found an interaction between pre-sleep metamemory of targeted episodes and the number of STAMP applications for those episodes during sleep, and that STAMPs elicit increases in left temporal slow-spindle (9-12 Hz) power that are predictive of overnight metamemory improvements. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory based on pre-sleep performance and tracking the STAMPinduced biomarker during sleep, and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Song, Yale; Soleymani, Mohammad
Polysemous Visual-Semantic Embedding for Cross-Modal Retrieval Inproceedings
In: Proceedings of the 2019 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10, IEEE, Long Beach, CA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{song_polysemous_2019,
title = {Polysemous Visual-Semantic Embedding for Cross-Modal Retrieval},
author = {Yale Song and Mohammad Soleymani},
url = {https://arxiv.org/abs/1906.04402},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {10},
publisher = {IEEE},
address = {Long Beach, CA},
abstract = {Visual-semantic embedding aims to find a shared latent space where related visual and textual instances are close to each other. Most current methods learn injective embedding functions that map an instance to a single point in the shared space. Unfortunately, injective embedding cannot effectively handle polysemous instances with multiple possible meanings; at best, it would find an average representation of different meanings. This hinders its use in real-world scenarios where individual instances and their cross-modal associations are often ambiguous. In this work, we introduce Polysemous Instance Embedding Networks (PIE-Nets) that compute multiple and diverse representations of an instance by combining global context with locally-guided features via multi-head self-attention and residual learning. To learn visual-semantic embedding, we tie-up two PIE-Nets and optimize them jointly in the multiple instance learning framework. Most existing work on cross-modal retrieval focus on image-text pairs of data. Here, we also tackle a more challenging case of video-text retrieval. To facilitate further research in video-text retrieval, we release a new dataset of 50K video-sentence pairs collected from social media, dubbed MRW (my reaction when). We demonstrate our approach on both image-text and video-text retrieval scenarios using MS-COCO, TGIF, and our new MRW dataset.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Lucas, Gale; Kim, Peter; Gratch, Jonathan
Intelligent Tutoring System for Negotiation Skills Training Incollection
In: Artificial Intelligence in Education, vol. 11626, pp. 122–127, Springer International Publishing, Cham, Switzerland, 2019, ISBN: 978-3-030-23206-1 978-3-030-23207-8.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{johnson_intelligent_2019,
title = {Intelligent Tutoring System for Negotiation Skills Training},
author = {Emmanuel Johnson and Gale Lucas and Peter Kim and Jonathan Gratch},
url = {http://link.springer.com/10.1007/978-3-030-23207-8_23},
doi = {10.1007/978-3-030-23207-8_23},
isbn = {978-3-030-23206-1 978-3-030-23207-8},
year = {2019},
date = {2019-06-01},
booktitle = {Artificial Intelligence in Education},
volume = {11626},
pages = {122--127},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Intelligent tutoring systems have proven very effective at teaching hard skills such as math and science, but less research has examined how to teach “soft” skills such as negotiation. In this paper, we introduce an effective approach to teaching negotiation tactics. Prior work showed that students can improve through practice with intelligent negotiation agents. We extend this work by proposing general methods of assessment and feedback that could be applied to a variety of such agents. We evaluate these techniques through a human subject study. Our study demonstrates that personalized feedback improves students’ use of several foundational tactics.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Editor, Managing; Adler, Aaron; Dasgupta, Prithviraj; DePalma, Nick; Eslami, Mohammed; Freedman, Richard; Laird, John; Lebiere, Christian; Lohan, Katrin; Mead, Ross; Roberts, Mark; Rosenbloom, Paul; Senft, Emmanuel; Stein, Frank; Williams, Tom; Wray, Kyle Hollins; Yaman, Fusun; Zilberstein, Shlomo
Reports of the 2018 AAAI Fall Symposium Journal Article
In: AI Magazine, vol. 40, no. 2, pp. 66–72, 2019, ISSN: 2371-9621, 0738-4602.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{editor_reports_2019,
title = {Reports of the 2018 AAAI Fall Symposium},
author = {Managing Editor and Aaron Adler and Prithviraj Dasgupta and Nick DePalma and Mohammed Eslami and Richard Freedman and John Laird and Christian Lebiere and Katrin Lohan and Ross Mead and Mark Roberts and Paul Rosenbloom and Emmanuel Senft and Frank Stein and Tom Williams and Kyle Hollins Wray and Fusun Yaman and Shlomo Zilberstein},
url = {http://www.aaai.org/ojs/index.php/aimagazine/article/view/2887},
doi = {10.1609/aimag.v40i2.2887},
issn = {2371-9621, 0738-4602},
year = {2019},
date = {2019-06-01},
journal = {AI Magazine},
volume = {40},
number = {2},
pages = {66--72},
abstract = {The AAAI 2018 Fall Symposium Series was held Thursday through Saturday, October 18–20, at the Westin Arlington Gateway in Arlington, Virginia, adjacent to Washington, D.C. The titles of the eight symposia were Adversary-Aware Learning Techniques and Trends in Cybersecurity; Artificial Intelligence for Synthetic Biology; Artificial Intelligence in Government and Public Sector; A Common Model of Cognition; Gathering for Artificial Intelligence and Natural System; Integrating Planning, Diagnosis, and Causal Reasoning; Interactive Learning in Artificial Intelligence for HumanRobot Interaction; and Reasoning and Learning in Real-World Systems for Long-Term Autonomy. The highlights of each symposium (except the Gathering for Artificial Intelligence and Natural System symposium, whose organizers failed to submit a summary) are presented in this report.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Zhu, Runhe; Becerik-Gerber, Burcin; Lucas, Gale; Southers, Erroll; Pynadath, David V
Information Requirements for Virtual Environments to Study Human-Building Interactions during Active Shooter Incidents Journal Article
In: Computing in Civil Engineering, pp. 8, 2019.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{zhu_information_2019,
title = {Information Requirements for Virtual Environments to Study Human-Building Interactions during Active Shooter Incidents},
author = {Runhe Zhu and Burcin Becerik-Gerber and Gale Lucas and Erroll Southers and David V Pynadath},
url = {https://ascelibrary.org/doi/10.1061/9780784482445.024},
doi = {10.1061/9780784482445.024},
year = {2019},
date = {2019-06-01},
journal = {Computing in Civil Engineering},
pages = {8},
abstract = {Active shooter incidents present an increasing American homeland security threat to public safety and human life. Several municipal law enforcement agencies have released building design guidelines intended to offer increased resilience and resistance to potential attacks. However, these design recommendations mainly focus on terrorist attacks, prioritizing the enhancement of building security, whereas their impact on safety during active shooter incidents, and corresponding human-building interactions (HBIs) that influence the outcomes (response performance), remain unclear. To respond to this research gap, virtual reality, with its ability to manipulate environmental variables and scenarios while providing safe non-invasive environments, could be a promising method to conduct human-subject studies in the context of active shooter incidents. In this paper, we identify the requirements for developing virtual environments that represent active shooter incidents in buildings to study HBIs and their impacts on the response performance. Key components constituting virtual environments were considered and presented. These include: (1) what types of buildings should be modeled in virtual environments; (2) how to select protective building design recommendations for active shooter incidents and model them in virtual environments; (3) what types of adversary and crowd behavior should be modeled; and (4) what types of interactions among participants, buildings, adversaries, and crowds should be included in virtual environments. Findings on the above key components were summarized to provide recommendations for future research directions.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Aryal, Ashrant; Becerik-Gerber, Burcin; Anselmo, Francesco; Roll, Shawn C.; Lucas, Gale M.
Smart Desks to Promote Comfort, Health, and Productivity in Offices: A Vision for Future Workplaces Journal Article
In: Frontiers in Built Environment, vol. 5, 2019, ISSN: 2297-3362.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{aryal_smart_2019,
title = {Smart Desks to Promote Comfort, Health, and Productivity in Offices: A Vision for Future Workplaces},
author = {Ashrant Aryal and Burcin Becerik-Gerber and Francesco Anselmo and Shawn C. Roll and Gale M. Lucas},
url = {https://www.frontiersin.org/article/10.3389/fbuil.2019.00076/full},
doi = {10.3389/fbuil.2019.00076},
issn = {2297-3362},
year = {2019},
date = {2019-06-01},
journal = {Frontiers in Built Environment},
volume = {5},
abstract = {People spend most of their day in buildings, and a large portion of the energy in buildings is used to control the indoor environment for creating acceptable conditions for occupants. However, the majority of the building systems are controlled based on a “one size fits all” scheme which cannot account for individual occupant preferences. This leads to discomfort, low satisfaction and negative impacts on occupants’ productivity, health and well-being. In this paper, we describe our vision of how recent advances in Internet of Things (IoT) and machine learning can be used to add intelligence to an office desk to personalize the environment around the user. The smart desk can learn individual user preferences for the indoor environment, personalize the environment based on user preferences and act as an intelligent support system for improving user comfort, health and productivity. We briefly describe the recent advances made in different domains that can be leveraged to enhance occupant experience in buildings and describe the overall framework for the smart desk. We conclude the paper with a discussion of possible avenues for further research.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Zhao, Yajie; Huang, Zeng; Li, Tianye; Chen, Weikai; LeGendre, Chloe; Ren, Xinglei; Xing, Jun; Shapiro, Ari; Li, Hao
Learning Perspective Undistortion of Portraits Journal Article
In: arXiv:1905.07515 [cs], 2019.
Abstract | Links | BibTeX | Tags: Graphics, Virtual Humans
@article{zhao_learning_2019,
title = {Learning Perspective Undistortion of Portraits},
author = {Yajie Zhao and Zeng Huang and Tianye Li and Weikai Chen and Chloe LeGendre and Xinglei Ren and Jun Xing and Ari Shapiro and Hao Li},
url = {http://arxiv.org/abs/1905.07515},
year = {2019},
date = {2019-05-01},
journal = {arXiv:1905.07515 [cs]},
abstract = {Near-range portrait photographs often contain perspective distortion artifacts that bias human perception and challenge both facial recognition and reconstruction techniques. We present the first deep learning based approach to remove such artifacts from unconstrained portraits. In contrast to the previous state-of-the-art approach, our method handles even portraits with extreme perspective distortion, as we avoid the inaccurate and error-prone step of first fitting a 3D face model. Instead, we predict a distortion correction flow map that encodes a per-pixel displacement that removes distortion artifacts when applied to the input image. Our method also automatically infers missing facial features, i.e. occluded ears caused by strong perspective distortion, with coherent details. We demonstrate that our approach significantly outperforms the previous state-of-the-art both qualitatively and quantitatively, particularly for portraits with extreme perspective distortion or facial expressions. We further show that our technique benefits a number of fundamental tasks, significantly improving the accuracy of both face recognition and 3D reconstruction and enables a novel camera calibration technique from a single portrait. Moreover, we also build the first perspective portrait database with a large diversity in identities, expression and poses, which will benefit the related research in this area.},
keywords = {Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gilani, Setareh Nasihati; Traum, David; Sortino, Rachel; Gallagher, Grady; Aaron-lozano, Kailyn; Padilla, Cryss; Shapiro, Ari; Lamberton, Jason; Petitto, Laura-ann
Can a Virtual Human Facilitate Language Learning in a Young Baby? Inproceedings
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, ACM, Montreal, Canada, 2019, ISBN: 978-1-4503-6309-9.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gilani_can_2019,
title = {Can a Virtual Human Facilitate Language Learning in a Young Baby?},
author = {Setareh Nasihati Gilani and David Traum and Rachel Sortino and Grady Gallagher and Kailyn Aaron-lozano and Cryss Padilla and Ari Shapiro and Jason Lamberton and Laura-ann Petitto},
url = {https://dl.acm.org/citation.cfm?id=3332035},
isbn = {978-1-4503-6309-9},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
publisher = {ACM},
address = {Montreal, Canada},
abstract = {There is a significant paucity of work on language learning systems for young infants [2, 5, 19] despite the widely understood critical importance that this developmental period has for healthy language and cognitive growth, and related reading and academic success [6, 14]. Deaf babies constitute one vulnerable population as they can experience dramatically reduced or no access to usable linguistic input during this period [18]. This causes potentially devastating impact on children's linguistic, cognitive, and social skills [9, 10, 15, 16, 20]. We introduced an AI system, called RAVE (Robot, AVatar, thermal Enhanced language learning tool), designed specifically for babies within the age range of 6-12 months [8, 17]. RAVE consists of two agents: a virtual human (provides language and socially contingent interactions) and an embodied robot (provides socially engaging physical cues to babies and directs babies' attention to the virtual human). Detailed description of the system's constituent components and dialogue algorithms are presented in [17] and [8].},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rychlowska, Magdalena; Schalk, Job; Gratch, Jonathan; Breitinger, Eva; Manstead, Antony S. R.
Beyond actions: Reparatory effects of regret in intergroup trust games Journal Article
In: Journal of Experimental Social Psychology, vol. 82, pp. 74–84, 2019, ISSN: 00221031.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{rychlowska_beyond_2019,
title = {Beyond actions: Reparatory effects of regret in intergroup trust games},
author = {Magdalena Rychlowska and Job Schalk and Jonathan Gratch and Eva Breitinger and Antony S. R. Manstead},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0022103118303974},
doi = {10.1016/j.jesp.2019.01.006},
issn = {00221031},
year = {2019},
date = {2019-05-01},
journal = {Journal of Experimental Social Psychology},
volume = {82},
pages = {74--84},
abstract = {Intergroup trust is vital for cooperation and societal well-being, but is harder to establish than interpersonal trust. We investigate whether expressions of negative emotions, in particular regret, following economic decisions can shape intergroup trust. In each of three studies participants were members of a group playing a tworound trust game with another group. In the first round, they observed an outgroup member who acted fairly or unfairly towards the ingroup and then expressed positive (i.e., happiness) or negative (i.e., regret, unhappiness) emotions about this behavior. In the second round, participants played with another outgroup member. Emotions displayed by the outgroup representative following unfair behavior in round 1 influenced participants' allocations in round 2, which were higher following regret and unhappiness than following positive emotions. Thus, emotions expressed by one outgroup member affected interactions with other members who had not communicated emotions. Findings of Study 3 revealed that these effects were driven by regret increasing intergroup trust, rather than by happiness decreasing it. Moreover, participants' allocations were predicted by their perceptions of the extent to which the outgroup representative wished to change her behavior. Together, the findings reveal that regret expressions influence intergroup trust by attenuating the detrimental effects of unfair behavior.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Koeman, Vincent J; Hindriks, Koen V; Gratch, Jonathan; Jonker, Catholijn M
Recognising and Explaining Bidding Strategies in Negotiation Support Systems Inproceedings
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pp. 3, ACM, Montreal, Canada, 2019, ISBN: 978-1-4503-6309-9.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{koeman_recognising_2019,
title = {Recognising and Explaining Bidding Strategies in Negotiation Support Systems},
author = {Vincent J Koeman and Koen V Hindriks and Jonathan Gratch and Catholijn M Jonker},
url = {https://dl.acm.org/citation.cfm?id=3332011},
isbn = {978-1-4503-6309-9},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {3},
publisher = {ACM},
address = {Montreal, Canada},
abstract = {To improve a negotiator's ability to recognise bidding strategies, we pro-actively provide explanations that are based on the opponent's bids and the negotiator's guesses about the opponent's strategy. We introduce an aberration detection mechanism for recognising strategies and the notion of an explanation matrix. The aberration detection mechanism identifies when a bid falls outside the range of expected behaviour for a specific strategy. The explanation matrix is used to decide when to provide what explanations. We evaluated our work experimentally in a task in which participants are asked to identify their opponent's strategy in the environment of a negotiation support system, namely the Pocket Negotiator (PN). We implemented our explanation mechanism in the PN and experimented with different explanation matrices. As the number of correct guesses increases with explanations, indirectly, these experiments show the effectiveness of our aberration detection mechanism. Our experiments with over 100 participants show that suggesting consistent strategies is more effective than explaining why observed behaviour is inconsistent.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Core, Mark G; Nye, Benjamin D; Karumbaiah, Shamya; Auerbach, Daniel; Ram, Maya
Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training Inproceedings
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pp. 9, IFAAMAS, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{georgila_using_2019,
title = {Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training},
author = {Kallirroi Georgila and Mark G Core and Benjamin D Nye and Shamya Karumbaiah and Daniel Auerbach and Maya Ram},
url = {http://www.ifaamas.org/Proceedings/aamas2019/pdfs/p737.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {9},
publisher = {IFAAMAS},
address = {Montreal, Canada},
abstract = {Reinforcement Learning (RL) has been applied successfully to Intelligent Tutoring Systems (ITSs) in a limited set of well-defined domains such as mathematics and physics. This work is unique in using a large state space and for applying RL to tutoring interpersonal skills. Interpersonal skills are increasingly recognized as critical to both social and economic development. In particular, this work enhances an ITS designed to teach basic counseling skills that can be applied to challenging issues such as sexual harassment and workplace conflict. An initial data collection was used to train RL policies for the ITS, and an evaluation with human participants compared a hand-crafted ITS which had been used for years with students (control) versus the new ITS guided by RL policies. The RL condition differed from the control condition most notably in the strikingly large quantity of guidance it provided to learners. Both systems were effective and there was an overall significant increase from pre- to post-test scores. Although learning gains did not differ significantly between conditions, learners had a significantly higher self-rating of confidence in the RL condition. Confidence and learning gains were both part of the reward function used to train the RL policies, and it could be the case that there was the most room for improvement in confidence, an important learner emotion. Thus, RL was successful in improving an ITS for teaching interpersonal skills without the need to prune the state space (as previously done).},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}