Publications
Search
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Visualization of social emotional appraisal process of an agent Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW), pp. 1–2, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-021-3.
@inproceedings{sato_visualization_2021,
title = {Visualization of social emotional appraisal process of an agent},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9666329/},
doi = {10.1109/ACIIW52867.2021.9666329},
isbn = {978-1-66540-021-3},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)},
pages = {1–2},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan
Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 139–144, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{johnson_comparing_2021,
title = {Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478354},
doi = {10.1145/3472306.3478354},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {139–144},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan; Boberg, Jill; DeVault, David; Kim, Peter; Lucas, Gale
Using Intelligent Agents to Examine Gender in Negotiations Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 90–97, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{johnson_using_2021,
title = {Using Intelligent Agents to Examine Gender in Negotiations},
author = {Emmanuel Johnson and Jonathan Gratch and Jill Boberg and David DeVault and Peter Kim and Gale Lucas},
url = {https://dl.acm.org/doi/10.1145/3472306.3478348},
doi = {10.1145/3472306.3478348},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {90–97},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Terada, Kazunori; Okazoe, Mitsuki; Gratch, Jonathan
Effect of politeness strategies in dialogue on negotiation outcomes Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 195–202, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{terada_effect_2021,
title = {Effect of politeness strategies in dialogue on negotiation outcomes},
author = {Kazunori Terada and Mitsuki Okazoe and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478336},
doi = {10.1145/3472306.3478336},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {195–202},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Lucas, Gale
Rapport Between Humans and Socially Interactive Agents Book Section
In: Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.): The Handbook on Socially Interactive Agents, pp. 433–462, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
@incollection{gratch_rapport_2021,
title = {Rapport Between Humans and Socially Interactive Agents},
author = {Jonathan Gratch and Gale Lucas},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/10.1145/3477322.3477335},
doi = {10.1145/3477322.3477335},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {The Handbook on Socially Interactive Agents},
pages = {433–462},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale; Gratch, Jonathan
Towards Emotion-Aware Agents For Negotiation Dialogues Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
@inproceedings{chawla_towards_2021,
title = {Towards Emotion-Aware Agents For Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9597427/},
doi = {10.1109/ACII52823.2021.9597427},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-27},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.)
1, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
@book{lugrin_handbook_2021,
title = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 1: Methods, Behavior, Cognition},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/book/10.1145/3477322},
doi = {10.1145/3477322},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Yin, Yufeng; Lu, Liupei; Xiao, Yao; Xu, Zhi; Cai, Kaijie; Jiang, Haonan; Gratch, Jonathan; Soleymani, Mohammad
Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
@inproceedings{yin_contrastive_2021,
title = {Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition},
author = {Yufeng Yin and Liupei Lu and Yao Xiao and Zhi Xu and Kaijie Cai and Haonan Jiang and Jonathan Gratch and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9597453/},
doi = {10.1109/ACII52823.2021.9597453},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Mozgai, Sharon
Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool Proceedings Article
In: Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents, pp. 109–111, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{hartholt_introducing_2021-1,
title = {Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {109–111},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {A major impediment to the success of virtual agents is the inability of non-technical experts to easily author content. To address this barrier we present VHMason, a multimodal authoring tool designed to help creative authors build embodied conversational agents. We introduce the novel aspects of this authoring tool and explore a use case of the creation of an agent-led educational experience implemented at Children's Hospital Los Angeles (CHLA).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert “Skip”; Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: Journal of Technology in Human Services, vol. 39, no. 3, pp. 314–347, 2021, ISSN: 1522-8835, (Publisher: Routledge _eprint: https://doi.org/10.1080/15228835.2021.1915931).
@article{rizzo_combat_2021,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Albert “Skip” Rizzo and Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1080/15228835.2021.1915931},
doi = {10.1080/15228835.2021.1915931},
issn = {1522-8835},
year = {2021},
date = {2021-07-01},
urldate = {2023-03-31},
journal = {Journal of Technology in Human Services},
volume = {39},
number = {3},
pages = {314–347},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
note = {Publisher: Routledge
_eprint: https://doi.org/10.1080/15228835.2021.1915931},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Horstmann, Aike C.; Gratch, Jonathan; Krämer, Nicole C.
I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person Journal Article
In: International Journal of Human-Computer Studies, pp. 102683, 2021, ISSN: 10715819.
@article{horstmann_i_2021,
title = {I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person},
author = {Aike C. Horstmann and Jonathan Gratch and Nicole C. Krämer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1071581921001014},
doi = {10.1016/j.ijhcs.2021.102683},
issn = {10715819},
year = {2021},
date = {2021-06-01},
urldate = {2021-06-18},
journal = {International Journal of Human-Computer Studies},
pages = {102683},
abstract = {Previous research focused on differences between interacting with a person-controlled avatar and a computer-controlled virtual agent. This study however examines an aspiring form of technology called agent representative which constitutes a mix of the former two interaction partner types since it is a computer agent which was previously instructed by a person to take over a task on the person’s behalf. In an experimental lab study with a 2 x 3 between-subjects-design (N = 195), people believed to study together either with an agent representative, avatar, or virtual agent. The interaction partner was described to either possess high or low expertise, while always giving negative feedback regarding the participant’s performance. Results show small but interesting differences regarding the type of agency. People attributed the most agency and blame to the person(s) behind the software and reported the most negative affect when interacting with an avatar, which was less the case for a person’s agent representative and the least for a virtual agent. Level of expertise had no significant effect and other evaluation measures were not affected.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 2021.
@article{gervits_classication-based_2021,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
year = {2021},
date = {2021-03-01},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: iScience, vol. 24, no. 3, pp. 102228, 2021, ISSN: 25890042.
@article{de_melo_heuristic_2021,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso M. Melo and Jonathan Gratch and Frank Krueger},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2589004221001966},
doi = {10.1016/j.isci.2021.102228},
issn = {25890042},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {iScience},
volume = {24},
number = {3},
pages = {102228},
abstract = {Autonomous machines are poised to become pervasive, but most treat machines differently: we are willing to violate social norms and less likely to display altruism toward machines. Here, we report an unexpected effect that those impacted by COVID-19—as measured by a post-traumatic stress disorder scale—show a sharp reduction in this difference. Participants engaged in the dictator game with humans and machines and, consistent with prior research on disasters, those impacted by COVID-19 displayed more altruism to other humans. Unexpectedly, participants impacted by COVID-19 displayed equal altruism toward human and machine partners. A mediation analysis suggests that altruism toward machines was explained by an increase in heuristic thinking—reinforcing prior theory that heuristic thinking encourages people to treat machines like people—and faith in technology—perhaps reflecting long-term consequences on how we act with machines. These findings give insight, but also raise concerns, for the design of technology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data Journal Article
In: J Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
@article{mell_expert-model_2021,
title = {An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-021-00368-w},
doi = {10.1007/s12193-021-00368-w},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-15},
journal = {J Multimodal User Interfaces},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other techniques (such as linear regression models or boosted decision trees). In a follow-up study, we show that the most successful models change as the dataset size increases and the prediction targets change, and show that boosted decision trees may not be suitable for the negotiation domain. We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation. Further, we show that this approach can be a stepping stone from purely exploratory research to targeted human-behavioral experimentation. Through our approach, areas of social artificial intelligence that have historically benefited from expert knowledge and traditional AI approaches can be combined with more recent proven-effective machine learning algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Book Section
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
@incollection{dharo_towards_2021,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
doi = {10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145--160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English Bilingual Chatbot Book Section
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 41–50, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
@incollection{dharo_masheli_2021,
title = {Masheli: A Choctaw-English Bilingual Chatbot},
author = {Jacqueline Brixey and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_4},
doi = {10.1007/978-981-15-8395-7_4},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {41--50},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to practice. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lee, Minha; Lucas, Gale; Gratch, Jonathan
Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games Journal Article
In: J Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
@article{lee_comparing_2021,
title = {Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games},
author = {Minha Lee and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-020-00356-6},
doi = {10.1007/s12193-020-00356-6},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
journal = {J Multimodal User Interfaces},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In a comparative manner, we observed how perceived minds of agents shape people’s behavior in the dictator game, ultimatum game, and negotiation against artificial agents. To do so, we varied agents’ minds on two dimensions of the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude) via descriptions and dialogs. In our first study, agents with emotional capacity garnered more allocations in the dictator game, but in the ultimatum game, agents’ described agency and affective capacity, both led to greater offers. In the second study on negotiation, agents ascribed with low-agency traits earned more points than those with high-agency traits, though the negotiation tactic was the same for all agents. Although patiency did not impact game points, participants sent more happy and surprise emojis and emotionally valenced messages to agents that demonstrated emotional capacity during negotiations. Further, our exploratory analyses indicate that people related only to agents with perceived affective aptitude across all games. Both perceived agency and affective capacity contributed to moral standing after dictator and ultimatum games. But after negotiations, only agents with perceived affective capacity were granted moral standing. Manipulating mind dimensions of machines has differing effects on how people react to them in dictator and ultimatum games, compared to a more complex economic exchange like negotiation. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations, in contrast with simple economic games.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The Promise and Peril of Automated Negotiators Journal Article
In: Negotiation Journal, vol. 37, no. 1, pp. 13–34, 2021, ISSN: 0748-4526, 1571-9979.
@article{gratch_promise_2021,
title = {The Promise and Peril of Automated Negotiators},
author = {Jonathan Gratch},
url = {https://onlinelibrary.wiley.com/doi/10.1111/nejo.12348},
doi = {10.1111/nejo.12348},
issn = {0748-4526, 1571-9979},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Negotiation Journal},
volume = {37},
number = {1},
pages = {13–34},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The field of Affective Computing: An interdisciplinary perspective Journal Article
In: Transactions of the Japanese Society for Artificial Intelligence, vol. 36, no. 1, pp. 13, 2021.
@article{gratch_field_2021,
title = {The field of Affective Computing: An interdisciplinary perspective},
author = {Jonathan Gratch},
url = {https://people.ict.usc.edu/~gratch/CSCI534/Readings/Gratch%20-%20The%20field%20of%20affective%20computing.pdf},
year = {2021},
date = {2021-01-01},
journal = {Transactions of the Japanese Society for Artificial Intelligence},
volume = {36},
number = {1},
pages = {13},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Risk of Injury in Moral Dilemmas With Autonomous Vehicles Journal Article
In: Front. Robot. AI, vol. 7, pp. 572529, 2021, ISSN: 2296-9144.
@article{de_melo_risk_2021,
title = {Risk of Injury in Moral Dilemmas With Autonomous Vehicles},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {https://www.frontiersin.org/articles/10.3389/frobt.2020.572529/full},
doi = {10.3389/frobt.2020.572529},
issn = {2296-9144},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Front. Robot. AI},
volume = {7},
pages = {572529},
abstract = {As autonomous machines, such as automated vehicles (AVs) and robots, become pervasive in society, they will inevitably face moral dilemmas where they must make decisions that risk injuring humans. However, prior research has framed these dilemmas in starkly simple terms, i.e., framing decisions as life and death and neglecting the influence of risk of injury to the involved parties on the outcome. Here, we focus on this gap and present experimental work that systematically studies the effect of risk of injury on the decisions people make in these dilemmas. In four experiments, participants were asked to program their AVs to either save five pedestrians, which we refer to as the utilitarian choice, or save the driver, which we refer to as the nonutilitarian choice. The results indicate that most participants made the utilitarian choice but that this choice was moderated in important ways by perceived risk to the driver and risk to the pedestrians. As a second contribution, we demonstrate the value of formulating AV moral dilemmas in a game-theoretic framework that considers the possible influence of others’ behavior. In the fourth experiment, we show that participants were more (less) likely to make the utilitarian choice, the more utilitarian (nonutilitarian) other drivers behaved; furthermore, unlike the game-theoretic prediction that decision-makers inevitably converge to nonutilitarianism, we found significant evidence of utilitarianism. We discuss theoretical implications for our understanding of human decision-making in moral dilemmas and practical guidelines for the design of autonomous machines that solve these dilemmas while, at the same time, being likely to be adopted in practice.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2018
Manuvinakurike, Ramesh; Bharadwaj, Sumanth; Georgila, Kallirroi
A Dialogue Annotation Scheme for Weight Management Chat using the Trans-Theoretical Model of Health Behavior Change Proceedings Article
In: Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation, arxiv.org, Sante Fe, New Mexico, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_dialogue_2018,
title = {A Dialogue Annotation Scheme for Weight Management Chat using the Trans-Theoretical Model of Health Behavior Change},
author = {Ramesh Manuvinakurike and Sumanth Bharadwaj and Kallirroi Georgila},
url = {https://arxiv.org/abs/1807.03948},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {arxiv.org},
address = {Sante Fe, New Mexico},
abstract = {A dialogue annotation scheme for weight management chat using the trans-theoretical model of health behavior change},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Khooshabeh, Peter; Amir, Ori; Gratch, Jonathan
Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing Proceedings Article
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 2224–2226, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{de_melo_shaping_2018,
title = {Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing},
author = {Celso M. Melo and Peter Khooshabeh and Ori Amir and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3238129},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {2224–2226},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Emotion expressions can help solve social dilemmas where individual interest is pitted against the collective interest. Building on research that shows that emotions communicate intentions to others, we reinforce that people can infer whether emotionally expressive computer agents intend to cooperate or compete. We further show important distinctions between computer agents that are perceived to be driven by humans (i.e., avatars) vs. by algorithms (i.e., agents). Our results reveal that, when the emotion expression reflects an intention to cooperate, participants will cooperate more with avatars than with agents; however, when the emotion reflects an intention to compete, participants cooperate just as little with avatars as with agents. Finally, we present first evidence that the way the dilemma is described - or framed - can influence people's decision-making. We discuss implications for the design of autonomous agents that foster cooperation with humans, beyond what game theory predicts in social dilemmas.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Monahan, Shannon; Johnson, Emmanuel; Lucas, Gale; Finch, James; Gratch, Jonathan
Autonomous Agent that Provides Automated Feedback Improves Negotiation Skills Book Section
In: Artificial Intelligence in Education, vol. 10948, pp. 225–229, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-93845-5 978-3-319-93846-2.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{monahan_autonomous_2018,
title = {Autonomous Agent that Provides Automated Feedback Improves Negotiation Skills},
author = {Shannon Monahan and Emmanuel Johnson and Gale Lucas and James Finch and Jonathan Gratch},
url = {http://link.springer.com/10.1007/978-3-319-93846-2_41},
doi = {10.1007/978-3-319-93846-2_41},
isbn = {978-3-319-93845-5 978-3-319-93846-2},
year = {2018},
date = {2018-06-01},
booktitle = {Artificial Intelligence in Education},
volume = {10948},
pages = {225–229},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Research has found that individuals can improve their negotiation abilities by practicing with virtual agents [1, 2]. For these pedagogical agents to become more “intelligent,” the system should be able to give feedback on negotiation performance [3, 4]. In this study, we examined the impact of providing such individualized feedback. Participants first engaged in a negotiation with a virtual agent. After this negotiation, participants were either given automated individualized feedback or not. Feedback was based on negotiation principles [4], which were quantified using a validated approach [5]. Participants then completed a second, parallel negotiation. Our results show that, compared to the control condition, participants who received such feedback after the first negotiation showed a significantly greater improvement in the strength of their first offer, concession curve, and thus their final outcome in the negotiation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Flenner, Arjuna; Fraune, Marlena R.; Hiatt, Laura M.; Kendall, Tony; Laird, John E.; Lebiere, Christian; Rosenbloom, Paul S.; Stein, Frank; Topp, Elin A.; Unhelkar, Vaibhav V.; Zhao, Ying
Reports of the AAAI 2017 Fall Symposium Series Journal Article
In: AI Magazine, vol. 38, no. 2, pp. 81–86, 2018, ISSN: 0738-4602, 0738-4602.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{flenner_reports_2018,
title = {Reports of the AAAI 2017 Fall Symposium Series},
author = {Arjuna Flenner and Marlena R. Fraune and Laura M. Hiatt and Tony Kendall and John E. Laird and Christian Lebiere and Paul S. Rosenbloom and Frank Stein and Elin A. Topp and Vaibhav V. Unhelkar and Ying Zhao},
url = {https://www.aaai.org/ojs/index.php/aimagazine/article/view/2813},
doi = {10.1609/aimag.v38i2.2813},
issn = {0738-4602, 0738-4602},
year = {2018},
date = {2018-06-01},
journal = {AI Magazine},
volume = {38},
number = {2},
pages = {81–86},
abstract = {The AAAI 2017 Fall Symposium Series was held Thursday through Saturday, November 9-11, at the Westin Arlington Gateway in Arlington, Virginia, adjacent to Washington, DC. The titles of the six symposia were Arti- ficial Intelligence for Human-Robot Interaction; Cognitive Assistance in Government and Public Sector Applications; Deep Models and Artificial Intelligence for Military Applications: Potentials, Theories, Practices, Tools, and Risks; Human-Agent Groups: Studies, Algorithms, and Challenges; Natural Communication for Human-Robot Collaboration; and A Standard Model of the Mind. The highlights of each symposium (except the Natural Communication for Human-Robot Collaboration symposium, whose organizers did not submit a report) are presented in this report.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Schwartz, David; Goldberg, Stephen L.
An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger Proceedings Article
In: Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE), pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC, Virtual Humans
@inproceedings{wang_analysis_2018,
title = {An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and David Schwartz and Stephen L. Goldberg},
url = {http://ceur-ws.org/Vol-2141/paper3.pdf},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE)},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. Using a low-cost and high-speed computer graphics and character animation technology, we created digital doppelgangers of students and placed them in a learning-byexplaining task where they interacted with digital doppelgangers of themselves. We investigate the research question of how does increasing the similarity of the physical appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual human listener in a learning-by-explaining paradigm. It presents an analysis of how students’ perceptions of the resemblance impact their learning experience and outcomes. The analysis and results offer insight into the promise and limitation of the application of this novel technology to pedagogical agents research.},
keywords = {ARL, DoD, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Karumbaiah, Shamya; Tokel, S. Tugba; Core, Mark G.; Stratou, Giota; Auerbach, Daniel; Georgila, Kallirroi
Engaging with the Scenario: Affect and Facial Patterns from a Scenario-Based Intelligent Tutoring System Proceedings Article
In: Proceeding of the International Conference on Artificial Intelligence in Education, pp. 352–366, Springer International Publishing, London, UK, 2018, ISBN: 978-3-319-93842-4 978-3-319-93843-1.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{nye_engaging_2018,
title = {Engaging with the Scenario: Affect and Facial Patterns from a Scenario-Based Intelligent Tutoring System},
author = {Benjamin D. Nye and Shamya Karumbaiah and S. Tugba Tokel and Mark G. Core and Giota Stratou and Daniel Auerbach and Kallirroi Georgila},
url = {http://link.springer.com/10.1007/978-3-319-93843-1_26},
doi = {10.1007/978-3-319-93843-1_26},
isbn = {978-3-319-93842-4 978-3-319-93843-1},
year = {2018},
date = {2018-06-01},
booktitle = {Proceeding of the International Conference on Artificial Intelligence in Education},
volume = {10947},
pages = {352–366},
publisher = {Springer International Publishing},
address = {London, UK},
abstract = {Facial expression trackers output measures for facial action units (AUs), and are increasingly being used in learning technologies. In this paper, we compile patterns of AUs seen in related work as well as use factor analysis to search for categories implicit in our corpus. Although there was some overlap between the factors in our data and previous work, we also identified factors seen in the broader literature but not previously reported in the context of learning environments. In a correlational analysis, we found evidence for relationships between factors and self-reported traits such as academic effort, study habits, and interest in the subject. In addition, we saw differences in average levels of factors between a video watching activity, and a decision making activity. However, in this analysis, we were not able to isolate any facial expressions having a significant positive or negative relationship with either learning gain, or performance once question difficulty and related factors were also considered. Given the overall low levels of facial affect in the corpus, further research will explore different populations and learning tasks to test the possible hypothesis that learners may have been in a pattern of “Over-Flow” in which they were engaged with the system, but not deeply thinking about the content or their errors.},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hampton, Andrew J.; Nye, Benjamin D.; Pavlik, Philip I.; Swartout, William R.; Graesser, Arthur C.; Gunderson, Joseph
Mitigating Knowledge Decay from Instruction with Voluntary Use of an Adaptive Learning System Proceedings Article
In: Proceedings of the International Conference on Artificial Intelligence in Education, pp. 119–133, Springer International Publishing, London, UK, 2018, ISBN: 978-3-319-93845-5 978-3-319-93846-2.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{hampton_mitigating_2018,
title = {Mitigating Knowledge Decay from Instruction with Voluntary Use of an Adaptive Learning System},
author = {Andrew J. Hampton and Benjamin D. Nye and Philip I. Pavlik and William R. Swartout and Arthur C. Graesser and Joseph Gunderson},
url = {http://link.springer.com/10.1007/978-3-319-93846-2_23},
doi = {10.1007/978-3-319-93846-2_23},
isbn = {978-3-319-93845-5 978-3-319-93846-2},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the International Conference on Artificial Intelligence in Education},
volume = {10948},
pages = {119–133},
publisher = {Springer International Publishing},
address = {London, UK},
abstract = {Knowledge decays across breaks in instruction. Learners lack the metacognition to self-assess their knowledge decay and effectively self-direct review, as well as lacking interactive exercises appropriate to their individual knowledge level. Adaptive learning systems offer the potential to mitigate these issues, by providing open learner models to facilitate learner’s understanding of their knowledge levels and by presenting personalized practice exercises. The current study analyzes differences in knowledge decay between learners randomly assigned to an intervention where they could use an adaptive system during a long gap between courses, compared with a control condition. The experimental condition used the Personal Assistant for Life-Long Learning (PAL3), a tablet-based adaptive learning system integrating multiple intelligent tutoring systems and conventional learning resources. It contained electronics content relevant to the experiment participants, Navy sailors who graduated from apprentice electronics courses (A-School) awaiting assignment to their next training (C-School). The study was conducted over one month, collecting performance data with a counterbalanced pre-, mid-, and post-test. The control condition exhibited the expected decay. The PAL3 condition showed a significant difference from the control, with no significant knowledge decay in their overall knowledge, despite substantial variance in usage for PAL3 (e.g., most of overall use in the first week, with fewer participants engaging as time went on). Interestingly, while overall decay was mitigated in PAL3, this result was primarily through gains in some knowledge offsetting losses in other knowledge. Overall, these results indicate that adaptive study tools can help prevent knowledge decay, even with voluntary usage.},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Merchant, Chirag; Schwartz, David; Goldberg, Stephen L.
Learning by Explaining to a Digital Doppelganger Book Section
In: Intelligent Tutoring Systems, vol. 10858, pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, Social Simulation, UARC, Virtual Humans
@incollection{wang_learning_2018,
title = {Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and Chirag Merchant and David Schwartz and Stephen L. Goldberg},
url = {http://link.springer.com/10.1007/978-3-319-91464-0_25},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-05-01},
booktitle = {Intelligent Tutoring Systems},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. An emerging computer animation technology makes the creation of digital doppelgangers an accessible reality. This allows researchers in pedagogical agents to explore previously unexplorable research questions, such as how does increasing the similarity in appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual listener in a learning-by-explaining paradigm. Results offer insight into the promise and limitation of this novel technology.},
keywords = {ARL, DoD, MedVR, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Bonial, Claire; Lukin, Stephanie M.; Foots, Ashley; Henry, Cassidy; Marge, Matthew; Pollard, Kimberly A.; Artstein, Ron; Traum, David; Voss, Clare R.
Human-Robot Dialogue and Collaboration in Search and Navigation Proceedings Article
In: Proceedings of the AREA Workshop: Annotation, Recognition, and Evaluation of Actions, AREA 2018, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, Virtual Humans
@inproceedings{bonial_human-robot_2018,
title = {Human-Robot Dialogue and Collaboration in Search and Navigation},
author = {Claire Bonial and Stephanie M. Lukin and Ashley Foots and Cassidy Henry and Matthew Marge and Kimberly A. Pollard and Ron Artstein and David Traum and Clare R. Voss},
url = {http://www.areaworkshop.org/wp-content/uploads/2018/05/4.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the AREA Workshop: Annotation, Recognition, and Evaluation of Actions},
publisher = {AREA 2018},
address = {Miyazaki, Japan},
abstract = {Collaboration with a remotely located robot in tasks such as disaster relief and search and rescue can be facilitated by grounding natural language task instructions into actions executable by the robot in its current physical context. The corpus we describe here provides insight into the translation and interpretation a natural language instruction undergoes starting from verbal human intent, to understanding and processing, and ultimately, to robot execution. We use a ‘Wizard-of-Oz’ methodology to elicit the corpus data in which a participant speaks freely to instruct a robot on what to do and where to move through a remote environment to accomplish collaborativesearchandnavigationtasks. Thisdataoffersthepotentialforexploringandevaluatingactionmodelsbyconnectingnatural language instructions to execution by a physical robot (controlled by a human ‘wizard’). In this paper, a description of the corpus (soon to be openly available) and examples of actions in the dialogue are provided.},
keywords = {ARL, DoD, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Pincus, Eli; Artstein, Ron
Chahta Anumpa: A Multimodal Corpus of the Choctaw Language Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 3371–3376, ELRA, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{brixey_chahta_2018,
title = {Chahta Anumpa: A Multimodal Corpus of the Choctaw Language},
author = {Jacqueline Brixey and Eli Pincus and Ron Artstein},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/822.html},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {3371–3376},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {This paper presents a general use corpus for the Native American indigenous language Choctaw. The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for the threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Henry, Cassidy; Lukin, Stephanie; Artstein, Ron; Gervitz, Felix; Pollard, Kim; Bonial, Claire; Lei, Su; Voss, Clare R.; Marge, Matthew; Hayes, Cory J.; Hill, Susan G.
Dialogue Structure Annotation for Multi-Floor Interaction Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 104–111, ELRA, Miyazaki, Japan, 2018, ISBN: 979-10-95546-00-9.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{traum_dialogue_2018,
title = {Dialogue Structure Annotation for Multi-Floor Interaction},
author = {David Traum and Cassidy Henry and Stephanie Lukin and Ron Artstein and Felix Gervitz and Kim Pollard and Claire Bonial and Su Lei and Clare R. Voss and Matthew Marge and Cory J. Hayes and Susan G. Hill},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/672.html},
isbn = {979-10-95546-00-9},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {104–111},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {We present an annotation scheme for meso-level dialogue structure, specifically designed for multi-floor dialogue. The scheme includes a transaction unit that clusters utterances from multiple participants and floors into units according to realization of an initiator’s intent, and relations between individual utterances within the unit. We apply this scheme to annotate a corpus of multi-floor human-robot interaction dialogues. We examine the patterns of structure observed in these dialogues and present inter-annotator statistics and relative frequencies of types of relations and transaction units. Finally, some example applications of these annotations are introduced.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Brixey, Jacqueline; Bui, Trung; Chang, Walter; Kim, Doo Soon; Artstein, Ron; Georgila, Kallirroi
Edit me: A Corpus and a Framework for Understanding Natural Language Image Editing Proceedings Article
In: Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC), LREC, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_edit_2018,
title = {Edit me: A Corpus and a Framework for Understanding Natural Language Image Editing},
author = {Ramesh Manuvinakurike and Jacqueline Brixey and Trung Bui and Walter Chang and Doo Soon Kim and Ron Artstein and Kallirroi Georgila},
url = {http://www.lrec-conf.org/proceedings/lrec2018/pdf/481.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC)},
publisher = {LREC},
address = {Miyazaki, Japan},
abstract = {This paper introduces the task of interacting with an image editing program through natural language. We present a corpus of image edit requests which were elicited for real world images, and an annotation framework for understanding such natural language instructions and mapping them to actionable computer commands. Finally, we evaluate crowd-sourced annotation as a means of efficiently creating a sizable corpus at a reasonable cost.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stocco, Andrea; Laird, John; Lebiere, Christian; Rosenbloom, Paul
Empirical Evidence from Neuroimaging Data for a Standard Model of the Mind Proceedings Article
In: Proceedings of the 40th Annual Meeting of the Cognitive Science Society, Cognitive Science Society, Madison, WI, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{stocco_empirical_2018,
title = {Empirical Evidence from Neuroimaging Data for a Standard Model of the Mind},
author = {Andrea Stocco and John Laird and Christian Lebiere and Paul Rosenbloom},
url = {https://www.researchgate.net/publication/325106544_Empirical_Evidence_from_Neuroimaging_Data_for_a_Standard_Model_of_the_Mind},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 40th Annual Meeting of the Cognitive Science Society},
publisher = {Cognitive Science Society},
address = {Madison, WI},
abstract = {In a recent paper, Laird, Lebiere, and Rosenbloom (2017) highlight how 40 years of research on cognitive architectures has begun to yield a dramatic convergence of different approaches towards a set of basic assumptions that they called the “Standard Model of the Mind” (SMM), in analogy to the Standard Model of particle physics. The SMM was designed to capture a consensus view of “human-like minds”, whether from AI or cognitive science, which if valid must also be true of the human brain. Here, we provide a preliminary test of this hypothesis based on a re-analysis of fMRI data from four tasks that span a wide range of cognitive functions and cognitive complexity, and are representative of the specific form of intelligence and flexibility that is associated with higherlevel human cognition. Using an established method (Dynamic Causal Modeling) to examine functional connectivity between brain regions, the SMM was compared against two alternative models that violate either functional or structural assumptions of the SMM. The results show that, in every dataset, the SMM significantly outperforms the other models, suggesting that the SMM best captures the functional requirements of brain dynamics in fMRI data among these alternatives.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Gordon, Carla; Choi, Hyungtak; Boberg, Jill; Jeon, Heesik; Traum, David
Toward Low-Cost Automated Evaluation Metrics for Internet of Things Dialogues Proceedings Article
In: Proceedings of the 9th International Workshop on Spoken Dialogue Systems Technology (IWSDS), IWSDS, Singapore, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{georgila_toward_2018,
title = {Toward Low-Cost Automated Evaluation Metrics for Internet of Things Dialogues},
author = {Kallirroi Georgila and Carla Gordon and Hyungtak Choi and Jill Boberg and Heesik Jeon and David Traum},
url = {http://www.colips.org/conferences/iwsds2018/wp/wp-content/uploads/2018/03/IWSDS-2018_paper_18.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 9th International Workshop on Spoken Dialogue Systems Technology (IWSDS)},
publisher = {IWSDS},
address = {Singapore},
abstract = {We analyze a corpus of system-user dialogues in the Internet of Things domain. Our corpus is automatically, semi-automatically, and manually annotated with a variety of features both on the utterance level and the full dialogue level. The corpus also includes human ratings of dialogue quality collected via crowdsourcing. We calculate correlations between features and human ratings to identify which features are highly associated with human perceptions about dialogue quality in this domain. We also perform linear regression and derive a variety of dialogue quality evaluation functions. These evaluation functions are then applied to a heldout portion of our corpus, and are shown to be highly predictive of human ratings and outperform standard reward-based evaluation functions.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiao, Gang; Georgila, Kallirroi
A Comparison of Reinforcement Learning Methodologies in Two-Party and Three-Party Negotiation Dialogue Proceedings Article
In: Proceedings of the The Thirty-First International Florida Artificial Intelligence Research Society Conference (FLAIRS-31), AAAI, Melbourne, FL, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{xiao_comparison_2018,
title = {A Comparison of Reinforcement Learning Methodologies in Two-Party and Three-Party Negotiation Dialogue},
author = {Gang Xiao and Kallirroi Georgila},
url = {https://aaai.org/ocs/index.php/FLAIRS/FLAIRS18/paper/view/17687},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the The Thirty-First International Florida Artificial Intelligence Research Society Conference (FLAIRS-31)},
publisher = {AAAI},
address = {Melbourne, FL},
abstract = {We use reinforcement learning to learn dialogue policies in a collaborative furniture layout negotiation task. We employ a variety of methodologies (i.e., learning against a simulated user versus co-learning) and algorithms. Our policies achieve the best solution or a good solution to this problem for a variety of settings and initial conditions, including in the presence of noise (e.g., due to speech recognition or natural language understanding errors). Also, our policies perform well even in situations not observed during training. Policies trained against a simulated user perform well while interacting with policies trained through co-learning, and vice versa. Furthermore, policies trained in a two-party setting are successfully applied to a three-party setting, and vice versa.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Boberg, Jill; Gainer, Alesia; Gratch, Jonathan; Johnson, Emmanuel; Leuski, Anton; Lucas, Gale; Traum, David
The Niki and Julie Corpus: Collaborative Multimodal Dialogues between Humans, Robots, and Virtual Agents Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), European Language Resources Association (ELRA), Miyazaki, Japan, 2018, ISBN: 979-10-95546-00-9.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{artstein_niki_2018,
title = {The Niki and Julie Corpus: Collaborative Multimodal Dialogues between Humans, Robots, and Virtual Agents},
author = {Ron Artstein and Jill Boberg and Alesia Gainer and Jonathan Gratch and Emmanuel Johnson and Anton Leuski and Gale Lucas and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2018/pdf/482.pdf},
isbn = {979-10-95546-00-9},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
publisher = {European Language Resources Association (ELRA)},
address = {Miyazaki, Japan},
abstract = {The Niki and Julie corpus contains more than 600 dialogues between human participants and a human-controlled robot or virtual agent, engaged in a series of collaborative item-ranking tasks designed to measure influence. Some of the dialogues contain deliberate conversational errors by the robot, designed to simulate the kinds of conversational breakdown that are typical of present-day automated agents. Data collected include audio and video recordings, the results of the ranking tasks, and questionnaire responses; some of the recordings have been transcribed and annotated for verbal and nonverbal feedback. The corpus has been used to study influence and grounding in dialogue. All the dialogues are in American English.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Vinkemeier, Doratha; Valstar, Michel; Gratch, Jonathan
Predicting Folds in Poker Using Action Unit Detectors and Decision Trees Proceedings Article
In: Proceedings of the 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pp. 504–511, IEEE, Xi'an, China, 2018, ISBN: 978-1-5386-2335-0.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{vinkemeier_predicting_2018,
title = {Predicting Folds in Poker Using Action Unit Detectors and Decision Trees},
author = {Doratha Vinkemeier and Michel Valstar and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/8373874/},
doi = {10.1109/FG.2018.00081},
isbn = {978-1-5386-2335-0},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)},
pages = {504–511},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Predicting how a person will respond can be very useful, for instance when designing a strategy for negotiations. We investigate whether it is possible for machine learning and computer vision techniques to recognize a person's intentions and predict their actions based on their visually expressive behaviour, where in this paper we focus on the face. We have chosen as our setting pairs of humans playing a simplified version of poker, where the players are behaving naturally and spontaneously, albeit mediated through a computer connection. In particular, we ask if we can automatically predict whether a player is going to fold or not. We also try to answer the question of at what time point the signal for predicting if a player will fold is strongest. We use state-of-the-art FACS Action Unit detectors to automatically annotate the players facial expressions, which have been recorded on video. In addition, we use timestamps of when the player received their card and when they placed their bets, as well as the amounts they bet. Thus, the system is fully automated. We are able to predict whether a person will fold or not significantly better than chance based solely on their expressive behaviour starting three seconds before they fold.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wauck, Helen; Lucas, Gale; Shapiro, Ari; Feng, Andrew; Boberg, Jill; Gratch, Jonathan
Analyzing the Effect of Avatar Self-Similarity on Men and Women in a Search and Rescue Game Proceedings Article
In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, pp. 1–12, ACM Press, Montreal, Canada, 2018, ISBN: 978-1-4503-5620-6.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{wauck_analyzing_2018,
title = {Analyzing the Effect of Avatar Self-Similarity on Men and Women in a Search and Rescue Game},
author = {Helen Wauck and Gale Lucas and Ari Shapiro and Andrew Feng and Jill Boberg and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3173574.3174059},
doi = {10.1145/3173574.3174059},
isbn = {978-1-4503-5620-6},
year = {2018},
date = {2018-04-01},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {1–12},
publisher = {ACM Press},
address = {Montreal, Canada},
abstract = {A crucial aspect of virtual gaming experiences is the avatar: the player's virtual self-representation. While research has demonstrated benefits to using self-similar avatars in some virtual experiences, such avatars sometimes produce a more negative experience for women. To help researchers and game designers assess the cost-benefit tradeoffs of self-similar avatars, we compared players' performance and subjective experience in a search and rescue computer game when using two different photorealistic avatars: their own self or a friend, and when playing either a social (rescuing people) or a nonsocial (rescuing gems) version of the game. There was no effect of avatar appearance on players' performance or subjective experience in either game version, but we also found that women's experience with self-similar avatars was no more negative than men's. Our results suggest that avatar appearance may not make a difference to players in certain game contexts.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Lei, Su; Lucas, Gale; Johnson, Emmanuel; Tsang, Michael; Gratch, Jonathan; Traum, David
The Importance of Regulatory Fit & Early Success in a Human-Machine Game Proceedings Article
In: Proceedings of the first APA ACM Technology, Mind and Society Conference, pp. 1–6, ACM Press, Washington D.C., 2018, ISBN: 978-1-4503-5420-2.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{pincus_importance_2018,
title = {The Importance of Regulatory Fit & Early Success in a Human-Machine Game},
author = {Eli Pincus and Su Lei and Gale Lucas and Emmanuel Johnson and Michael Tsang and Jonathan Gratch and David Traum},
url = {http://dl.acm.org/citation.cfm?doid=3183654.3183661},
doi = {10.1145/3183654.3183661},
isbn = {978-1-4503-5420-2},
year = {2018},
date = {2018-04-01},
booktitle = {Proceedings of the first APA ACM Technology, Mind and Society Conference},
pages = {1–6},
publisher = {ACM Press},
address = {Washington D.C.},
abstract = {In this paper, we explore the potential of regulatory focus theory as a framework for personalizing human-machine interactions. We manipulate framing (gain or loss) of a collaborative word-guessing game where a fully-automated virtual human gives clues. Consistent with previous work on regulatory focus, we find evidence of significantly higher perceived task-success when participants have regulatory fit. Inconsistent with previous work, however, fit did not increase task-enjoyment (nor performance). Participants with gain framing had marginally higher enjoyment, regardless of their regulatory focus. We operationalize motivation by number of optional rounds played but failed to find a "fit" effect. Instead, players who achieved early success (scoring more points in initial rounds) were more motivated. Early success was significantly correlated with number of optional rounds played. This finding calls to attention the need for the literature to more thoroughly investigate the relationship between success-timing and total player playtime in the game.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
D’Mello, Sidney; Kappas, Arvid; Gratch, Jonathan
The Affective Computing Approach to Affect Measurement Journal Article
In: Emotion Review, vol. 10, no. 2, pp. 174–183, 2018.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{dmello_affective_2018,
title = {The Affective Computing Approach to Affect Measurement},
author = {Sidney D’Mello and Arvid Kappas and Jonathan Gratch},
url = {http://journals.sagepub.com/doi/abs/10.1177/1754073917696583},
doi = {10.1177/1754073917696583},
year = {2018},
date = {2018-04-01},
journal = {Emotion Review},
volume = {10},
number = {2},
pages = {174–183},
abstract = {Affective computing (AC) adopts a computational approach to study affect. We highlight the AC approach towards automated affect measures that jointly model machine-readable physiological/behavioral signals with affect estimates as reported by humans or experimentally elicited. We describe the conceptual and computational foundations of the approach followed by two case studies: one on discrimination between genuine and faked expressions of pain in the lab, and the second on measuring nonbasic affect in the wild. We discuss applications of the measures, analyze measurement accuracy and generalizability, and highlight advances afforded by computational tipping points, such as big data, wearable sensing, crowdsourcing, and deep learning. We conclude by advocating for increasing synergies between AC and affective science and offer suggestions toward that direction.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Scassellati, Brian; Shapiro, Ari; Traum, David; Petitto, Laura-Ann; Brawer, Jake; Tsui, Katherine; Gilani, Setareh Nasihati; Malzkuhn, Melissa; Manini, Barbara; Stone, Adam; Kartheiser, Geo; Merla, Arcangelo
Teaching Language to Deaf Infants with a Robot and a Virtual Human Proceedings Article
In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, pp. 1–13, ACM Press, Montreal, Canada, 2018, ISBN: 978-1-4503-5620-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{scassellati_teaching_2018,
title = {Teaching Language to Deaf Infants with a Robot and a Virtual Human},
author = {Brian Scassellati and Ari Shapiro and David Traum and Laura-Ann Petitto and Jake Brawer and Katherine Tsui and Setareh Nasihati Gilani and Melissa Malzkuhn and Barbara Manini and Adam Stone and Geo Kartheiser and Arcangelo Merla},
url = {http://dl.acm.org/citation.cfm?doid=3173574.3174127},
doi = {10.1145/3173574.3174127},
isbn = {978-1-4503-5620-6},
year = {2018},
date = {2018-04-01},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {1–13},
publisher = {ACM Press},
address = {Montreal, Canada},
abstract = {Children with insufficient exposure to language during critical developmental periods in infancy are at risk for cognitive, language, and social deficits [55]. This is especially difficult for deaf infants, as more than 90% are born to hearing parents with little sign language experience [48]. We created an integrated multi-agent system involving a robot and virtual human designed to augment language exposure for 6-12 month old infants. Human-machine design for infants is challenging, as most screen-based media are unlikely to support learning in [33]. While presently, robots are incapable of the dexterity and expressiveness required for signing, even if it existed, developmental questions remain about the capacity for language from artificial agents to engage infants. Here we engineered the robot and avatar to provide visual language to effect socially contingent human conversational exchange. We demonstrate the successful engagement of our technology through case studies of deaf and hearing infants.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Boberg, Jill; Traum, David; Artstein, Ron; Gratch, Jonathan; Gainer, Alesia; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Getting to Know Each Other: The Role of Social Dialogue in Recovery from Errors in Social Robots Proceedings Article
In: Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction, pp. 344–351, ACM Press, Chicago, IL, 2018, ISBN: 978-1-4503-4953-6.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_getting_2018,
title = {Getting to Know Each Other: The Role of Social Dialogue in Recovery from Errors in Social Robots},
author = {Gale M. Lucas and Jill Boberg and David Traum and Ron Artstein and Jonathan Gratch and Alesia Gainer and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {http://dl.acm.org/citation.cfm?doid=3171221.3171258},
doi = {10.1145/3171221.3171258},
isbn = {978-1-4503-4953-6},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction},
pages = {344–351},
publisher = {ACM Press},
address = {Chicago, IL},
abstract = {This work explores the extent to which social dialogue can mitigate (or exacerbate) the loss of trust caused when robots make conversational errors. Our study uses a NAO robot programmed to persuade users to agree with its rankings on two tasks. We perform two manipulations: (1) The timing of conversational errors - the robot exhibited errors either in the first task, the second task, or neither; (2) The presence of social dialogue - between the two tasks, users either engaged in a social dialogue with the robot or completed a control task. We found that the timing of the errors matters: replicating previous research, conversational errors reduce the robot's influence in the second task, but not on the first task. Social dialogue interacts with the timing of errors, acting as an intensifier: social dialogue helps the robot recover from prior errors, and actually boosts subsequent influence; but social dialogue backfires if it is followed by errors, because it extends the period of good performance, creating a stronger contrast effect with the subsequent errors. The design of social robots should therefore be more careful to avoid errors after periods of good performance than early on in a dialogue.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Lucas, Gale
Virtual Human Role Players for Studying Social Factors in Organizational Decision Making Journal Article
In: Frontiers in Psychology, vol. 9, 2018, ISSN: 1664-1078.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@article{khooshabeh_virtual_2018,
title = {Virtual Human Role Players for Studying Social Factors in Organizational Decision Making},
author = {Peter Khooshabeh and Gale Lucas},
url = {http://journal.frontiersin.org/article/10.3389/fpsyg.2018.00194/full},
doi = {10.3389/fpsyg.2018.00194},
issn = {1664-1078},
year = {2018},
date = {2018-03-01},
journal = {Frontiers in Psychology},
volume = {9},
abstract = {The cyber domain of military operations presents many challenges. A unique element is the social dynamic between cyber operators and their leadership because of the novel subject matter expertise involved in conducting technical cyber tasks, so there will be situations where senior leaders might have much less domain knowledge or no experience at all relative to the warfighters who report to them. Nonetheless, it will be important for junior cyber operators to convey convincing information relevant to a mission in order to persuade or influence a leader to make informed decisions. The power dynamic will make it difficult for the junior cyber operator to successfully influence a higher ranking leader. Here we present a perspective with a sketch for research paradigm(s) to study how different factors (normative vs. informational social influence, degree of transparency, and perceived appropriateness of making suggestions) might interact with differential social power dynamics of individuals in cyber decision-making contexts. Finally, we contextualize this theoretical perspective for the research paradigms in viable training technologies.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Krämer, Nicole C.; Lucas, Gale; Schmitt, Lea; Gratch, Jonathan
In: International Journal of Human-Computer Studies, vol. 109, pp. 112–121, 2018, ISSN: 10715819.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{kramer_social_2018,
title = {Social snacking with a virtual agent – On the interrelation of need to belong and effects of social responsiveness when interacting with artificial entities},
author = {Nicole C. Krämer and Gale Lucas and Lea Schmitt and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1071581917301271},
doi = {10.1016/j.ijhcs.2017.09.001},
issn = {10715819},
year = {2018},
date = {2018-01-01},
journal = {International Journal of Human-Computer Studies},
volume = {109},
pages = {112--121},
abstract = {Based on considerations that people´s need to belong can be temporarily satisfied by “social snacking” (Gardner et al., 2005) in the sense that in absence of social interactions which adequately satisfy belongingness needs surrogates can bridge lonely times, it was tested whether the interaction with a virtual agent can serve to ease the need for social contact. In a between subjects experimental setting, 79 participants interacted with a virtual agent who either displayed socially responsive nonverbal behavior or not. Results demonstrate that although there was no main effect of socially responsive behavior on participants´ subjective experience of rapport and on connectedness with the agent, those people with a high need to belong reported less willingness to engage in social activities after the interaction with a virtual agent – but only if the agent displayed socially responsive behavior.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2017
Rizzo, Albert; Roy, Michael J.; Hartholt, Arno; Costanzo, Michelle; Highland, Krista Beth; Jovanovic, Tanja; Norrholm, Seth D.; Reist, Chris; Rothbaum, Barbara; Difede, JoAnn
Virtual Reality Applications for the Assessment and Treatment of PTSD Book Section
In: Handbook of Military Psychology, pp. 453–471, Springer International Publishing, Cham, Switzerland, 2017, ISBN: 978-3-319-66190-2 978-3-319-66192-6.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@incollection{rizzo_virtual_2017,
title = {Virtual Reality Applications for the Assessment and Treatment of PTSD},
author = {Albert Rizzo and Michael J. Roy and Arno Hartholt and Michelle Costanzo and Krista Beth Highland and Tanja Jovanovic and Seth D. Norrholm and Chris Reist and Barbara Rothbaum and JoAnn Difede},
url = {http://link.springer.com/10.1007/978-3-319-66192-6_27},
doi = {10.1007/978-3-319-66192-6_27},
isbn = {978-3-319-66190-2 978-3-319-66192-6},
year = {2017},
date = {2017-12-01},
booktitle = {Handbook of Military Psychology},
pages = {453–471},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {War is one of the most challenging situations that a human being can encounter. The physical, emotional, cognitive, and psychological demands of a combat environment place tremendous stress on even the most well-prepared military people. It is no surprise that the stressful experiences, characteristics of operations in Iraq and Afghanistan, have produced significant numbers of service members (SMs) and veterans at risk for posttraumatic stress disorder (PTSD), as well as other psychosocial/behavioral health conditions. For example, as of June 2015, the Defense Medical Surveillance System reported 138,197 active duty SMs had been diagnosed with PTSD (Fischer, 2015). In a meta-analysis of studies published since 2001, 13.2% of infantry service members met the criteria for PTSD, with incidence rising dramatically to 25–30% in units with high levels of direct combat exposure (Kok, Herrell, Thomas, & Hoge, 2012). Moreover, as of early 2013, the prevalence of PTSD among discharged veterans receiving treatment at Veteran Affairs (VA) clinics was reported to be 29% (Fischer, 2013). These findings make a compelling case for a continued focus on developing and enhancing the availability of diverse evidence- based treatment options to address this military behavioral healthcare challenge. One emerging area of research and clinical focus is of the use of Virtual Reality (VR) simulation technology as a tool for delivering evidence-based approaches for the assessment and treatment of PTSD. Although in recent times, the popular media has lavishly reported on VR’s potential impact on all elements of our evolving digital culture, and has created the impression that VR is a novel technology, the reality is that VR is not a new concept, and many of its developmental roots are traceable to the 1980s and 1990s (Schnipper et al., 2015). Moreover, a large scientific literature has emerged over the last 20 years demonstrating the unique and added value that is accrued with the use of VR to address a wide range of clinical health conditions (Rizzo 1994; Rizzo et al., 1997; 2002; 2010; 2014; Rizzo, Cukor et al., 2015). Within that context, the present chapter will summarize the ways that researchers and clinicians have employed VR to create relevant simulations that can be applied to the assessment and treatment of PTSD.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Neubauer, Catherine; Mozgai, Sharon; Scherer, Stefan; Woolley, Joshua; Chuang, Brandon
Manual and Automatic Measures Confirm-Intranasal Oxytocin Increases Facial Expressivity Journal Article
In: Affective Computing and Intelligent Interaction, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, VHTL, Virtual Humans
@article{neubauer_manual_2017,
title = {Manual and Automatic Measures Confirm-Intranasal Oxytocin Increases Facial Expressivity},
author = {Catherine Neubauer and Sharon Mozgai and Stefan Scherer and Joshua Woolley and Brandon Chuang},
url = {https://www.researchgate.net/publication/321644417_Manual_and_Automatic_Measures_Confirm-Intranasal_Oxytocin_Increases_Facial_Expressivity?enrichId=rgreq-22efb1e32ef30cdd22e6bee2b3b63d56-XXX&enrichSource=Y292ZXJQYWdlOzMyMTY0NDQxNztBUzo1NjkwNTI4NzM4NTQ5NzZAMTUxMjY4NDE4NTcyOQ%3D%3D&el=1_x_2&_esc=publicationCoverPdf},
year = {2017},
date = {2017-12-01},
journal = {Affective Computing and Intelligent Interaction},
abstract = {The effects of oxytocin on facial emotional expressivity were investigated in individuals with schizophrenia and age-matched healthy controls during the completion of a Social Judgment Task (SJT) with a double-blind, placebo-controlled, cross-over design. Although pharmacological interventions exist to help alleviate some symptoms of schizophrenia, currently available agents are not effective at improving the severity of blunted facial affect. Participant facial expressivity was previously quantified from video recordings of the SJT using a wellvalidated manual approach (Facial Expression Coding System; FACES). We confirm these findings using an automated computer-based approach. Using both methods we found that the administration of oxytocin significantly increased total facial expressivity in individuals with schizophrenia and increased facial expressivity at trend level in healthy controls. Secondary analysis showed that oxytocin also significantly increased the frequency of negative valence facial expressions in individuals with schizophrenia but not in healthy controls and that oxytocin did not significantly increase positive valence facial expressions in either group. Both manual coding and automatic facial analysis revealed the same pattern of findings. Considering manual annotation can be expensive and timeconsuming, these results suggest that automatic facial analysis may be an efficient and cost-effective alternative to currently utilized manual approaches and may be ready for use in clinical settings.},
keywords = {ARL, DoD, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Laird, John E.; Lebiere, Christian; Rosenbloom, Paul S.
In: AI Magazine, vol. 38, no. 4, pp. 13, 2017, ISSN: 0738-4602, 0738-4602.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{laird_standard_2017,
title = {A Standard Model of the Mind: Toward a Common Computational Framework across Artificial Intelligence, Cognitive Science, Neuroscience, and Robotics},
author = {John E. Laird and Christian Lebiere and Paul S. Rosenbloom},
url = {https://search.proquest.com/docview/1987347010?pq-origsite=gscholar},
doi = {10.1609/aimag.v38i4.2744},
issn = {0738-4602, 0738-4602},
year = {2017},
date = {2017-12-01},
journal = {AI Magazine},
volume = {38},
number = {4},
pages = {13},
abstract = {The purpose of this article is to begin the process of engaging the international research community in developing what can be called a standard model of the mind, where the mind we have in mind here is human-like. The notion of a standard model has its roots in physics, where over more than a half-century the international community has developed and tested a standard model that combines much of what is known about particles. This model is assumed to be internally consistent, yet still have major gaps. Its function is to serve as a cumulative reference point for the field while also driving efforts to both extend and break it.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul S.
Lessons from Mapping Sigma onto the Standard Model of the Mind: Self-Monitoring, Memory/Learning, and Symbols Proceedings Article
In: Proceedings of the AAAI 2017 Fall Symposium on A Standard Model of the Mind, AAAI, Arlington, VA, 2017.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_lessons_2017,
title = {Lessons from Mapping Sigma onto the Standard Model of the Mind: Self-Monitoring, Memory/Learning, and Symbols},
author = {Paul S. Rosenbloom},
url = {https://aaai.org/ocs/index.php/FSS/FSS17/paper/view/15947},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of the AAAI 2017 Fall Symposium on A Standard Model of the Mind},
publisher = {AAAI},
address = {Arlington, VA},
abstract = {Sigma was one of the three architectures explicitly factored into the recent development of the standard model of the mind. Here we dig deeper into the mapping of Sigma onto the standard model begun there to explore three lessons that illustrate outstanding “issues” with the current standard model while providing food for thought for its future development.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bunt, Harry; Petukhova, Volha; Traum, David; Alexandersson, Jan
Dialogue Act Annotation with the ISO 24617-2 Standard Book Section
In: Multimodal Interaction with W3C Standards, pp. 109–135, Springer International Publishing, Cham, Switzerland, 2017, ISBN: 978-3-319-42814-7 978-3-319-42816-1.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{bunt_dialogue_2017,
title = {Dialogue Act Annotation with the ISO 24617-2 Standard},
author = {Harry Bunt and Volha Petukhova and David Traum and Jan Alexandersson},
url = {http://link.springer.com/10.1007/978-3-319-42816-1_6},
isbn = {978-3-319-42814-7 978-3-319-42816-1},
year = {2017},
date = {2017-11-01},
booktitle = {Multimodal Interaction with W3C Standards},
pages = {109–135},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {This chapter describes recent and ongoing annotation efforts using the ISO 24617-2 standard for dialogue act annotation. Experimental studies are reported on the annotation by human annotators and by annotation machines of some of the specific features of the ISO annotation scheme, such as its multidimensional annotation of communicative functions, the recognition of each of its nine dimensions, and the recognition of dialogue act qualifiers for certainty, conditionality, and sentiment. The construction of corpora of dialogues, annotated according to ISO 24617-2, is discussed, including the recent DBOX and DialogBank corpora.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Neubauer, Catherine; Chollet, Mathieu; Mozgai, Sharon; Dennison, Mark; Khooshabeh, Peter; Scherer, Stefan
The relationship between task-induced stress, vocal changes, and physiological state during a dyadic team task Proceedings Article
In: Proceedings of the 19th ACM International Conference on Multimodal Interaction, pp. 426–432, ACM Press, Glasgow, UK, 2017, ISBN: 978-1-4503-5543-8.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, VHTL, Virtual Humans
@inproceedings{neubauer_relationship_2017,
title = {The relationship between task-induced stress, vocal changes, and physiological state during a dyadic team task},
author = {Catherine Neubauer and Mathieu Chollet and Sharon Mozgai and Mark Dennison and Peter Khooshabeh and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=3136755.3136804},
doi = {10.1145/3136755.3136804},
isbn = {978-1-4503-5543-8},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of the 19th ACM International Conference on Multimodal Interaction},
pages = {426–432},
publisher = {ACM Press},
address = {Glasgow, UK},
abstract = {It is commonly known that a relationship exists between the human voice and various emotional states. Past studies have demonstrated changes in a number of vocal features, such as fundamental frequency f0 and peakSlope, as a result of varying emotional state. These voice characteristics have been shown to relate to emotional load, vocal tension, and, in particular, stress. Although much research exists in the domain of voice analysis, few studies have assessed the relationship between stress and changes in the voice during a dyadic team interaction. The aim of the present study was to investigate the multimodal interplay between speech and physiology during a high-workload, high-stress team task. Specifically, we studied task-induced effects on participants' vocal signals, specifically, the f0 and peakSlope features, as well as participants' physiology, through cardiovascular measures. Further, we assessed the relationship between physiological states related to stress and changes in the speaker's voice. We recruited participants with the specific goal of working together to diffuse a simulated bomb. Half of our sample participated in an "Ice Breaker" scenario, during which they were allowed to converse and familiarize themselves with their teammate prior to the task, while the other half of the sample served as our "Control". Fundamental frequency (f0), peakSlope, physiological state, and subjective stress were measured during the task. Results indicated that f0 and peakSlope significantly increased from the beginning to the end of each task trial, and were highest in the last trial, which indicates an increase in emotional load and vocal tension. Finally, cardiovascular measures of stress indicated that the vocal and emotional load of speakers towards the end of the task mirrored a physiological state of psychological "threat".},
keywords = {ARL, DoD, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Boberg, Jill; Traum, David; Artstein, Ron; Gratch, Jon; Gainer, Alesia; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
The Role of Social Dialogue and Errors in Robots Proceedings Article
In: Proceedings of the 5th International Conference on Human Agent Interaction, pp. 431–433, ACM Press, Bielefeld, Germany, 2017, ISBN: 978-1-4503-5113-3.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_role_2017,
title = {The Role of Social Dialogue and Errors in Robots},
author = {Gale M. Lucas and Jill Boberg and David Traum and Ron Artstein and Jon Gratch and Alesia Gainer and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {http://dl.acm.org/citation.cfm?doid=3125739.3132617},
doi = {10.1145/3125739.3132617},
isbn = {978-1-4503-5113-3},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of the 5th International Conference on Human Agent Interaction},
pages = {431–433},
publisher = {ACM Press},
address = {Bielefeld, Germany},
abstract = {Social robots establish rapport with human users. This work explores the extent to which rapport-building can benefit (or harm) conversations with robots, and under what circumstances this occurs. For example, previous work has shown that agents that make conversational errors are less capable of influencing people than agents that do not make errors [1]. Some work has shown this effect with robots, but prior research has not considered additional factors such as the level of rapport between the person and the robot. We predicted that building rapport through a social dialogue (such as an ice-breaker) could mitigate the detrimental effect of a robot's errors on influence. Our study used a Nao robot programmed to persuade users to agree with its rankings on two "survival tasks" (e.g., lunar survival task). We manipulated both errors and social dialogue:the robot either exhibited errors in the second survival task or not, and users either engaged in an ice-breaker with the robot between the two survival tasks or completed a control task. Replicating previous research, errors tended to reduce the robot's influence in the second survival task. Contrary to our prediction, results revealed that the ice-breaker did not mitigate the effect of errors, and if anything, errors were more harmful after the ice-breaker (intended to build rapport) than in the control condition. This backfiring of attempted rapport-building may be due to a contrast effect, suggesting that the design of social robots should avoid introducing dialogues of incongruent quality.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Hoegen, Rens; Lucas, Gale; Gratch, Jonathan
Investigating Gender Differences in Temporal Dynamics during an Iterated Social Dilemma: an Automatic Analysis Using Networks Proceedings Article
In: Proceedings of the 7th International Conference on Affective Computing and Intelligent Interaction, San Antonio, TX, 2017.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{stratou_investigating_2017,
title = {Investigating Gender Differences in Temporal Dynamics during an Iterated Social Dilemma: an Automatic Analysis Using Networks},
author = {Giota Stratou and Rens Hoegen and Gale Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Investigating%20Gender%20Differences%20in%20Temporal%20Dynamics%20during%20an%20Iterated%20Social.pdf},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of the 7th International Conference on Affective Computing and Intelligent Interaction},
address = {San Antonio, TX},
abstract = {Emotions have a temporal nature and very often personality traits and underlying psychological conditions are hidden in the dynamics of those expressions. Within this work, we investigate the dynamics of the facial displays of dyads during an iterated social dilemma. We focus on the effect of gender and gender-pairing on those behaviors. We use networks to capture the temporal dynamics and create measures of inter- and intra- personal dependencies of emotional states. Our analysis on an iterated prisoner’s dilemma corpus suggests that there are gender differences on the transitions of the emotional states and the degree of emotional influence from the opponent.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Schalk, Job Van Der; Hoegen, Rens; Gratch, Jonathan
Refactoring Facial Expressions: an Automatic Analysis of Natural Occurring Facial Expressions in Iterative Social Dilemma Proceedings Article
In: Proceedings of the 7th International Conference on Affective Computing and Intelligent Interaction, San Antonio, TX, 2017.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{stratou_refactoring_2017,
title = {Refactoring Facial Expressions: an Automatic Analysis of Natural Occurring Facial Expressions in Iterative Social Dilemma},
author = {Giota Stratou and Job Van Der Schalk and Rens Hoegen and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Refactoring%20Facial%20Expressions-an%20Automatic%20Analysis%20of%20Natural%20Occurring%20Facial.pdf},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of the 7th International Conference on Affective Computing and Intelligent Interaction},
address = {San Antonio, TX},
abstract = {Many automatic facial expression recognizers now output individual facial action units (AUs), but several lines of evidence suggest that it is the combination of AUs that is psychologically meaningful: e.g., (a) constraints arising from facial morphology, (b) prior published evidence, (c) claims arising from basic emotion theory. We performed factor analysis on a large data set and recovered factors that have been discussed in the literature as psychologically meaningful. Further we show that some of these factors have external validity in that they predict participant behaviors in an iterated prisoner’s dilemma task and in fact with more precision than the individual AUs. These results both reinforce the validity of automatic recognition (as these factors would be expected from accurate AU detection) and suggest the benefits of using such factors for understanding these facial expressions as social signals.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Khashe, Saba; Lucas, Gale; Becerik-Gerber, Burcin; Gratch, Jonathan
Buildings with persona: Towards effective building-occupant communication Journal Article
In: Computers in Human Behavior, vol. 75, pp. 607–618, 2017, ISSN: 07475632.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{khashe_buildings_2017,
title = {Buildings with persona: Towards effective building-occupant communication},
author = {Saba Khashe and Gale Lucas and Burcin Becerik-Gerber and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0747563217303618},
doi = {10.1016/j.chb.2017.05.040},
issn = {07475632},
year = {2017},
date = {2017-10-01},
journal = {Computers in Human Behavior},
volume = {75},
pages = {607–618},
abstract = {Occupant behavior is one of the most significant contributors to building energy consumption. Employing communication systems to enable buildings to interact with their occupants and influence the way they behave could significantly reduce energy consumption.We investigated the effectiveness of different delivery styles (i.e., avatar, voice, and text), as well as the impact of communicator’s persona (i.e., building facility manager and building itself) and gender (i.e., male and female) on occupants’ compliance with pro-environmental requests. The results showed that avatar is more effective than voice and voice is more effective than text on promoting compliance with persuasive pro-environmental requests. In addition, results showed greater compliance with requests made by the persona of a building facility manager than the persona of the building itself. Finally, participants were more likely to comply with the female communicator than the male communicator. Accordingly, this new interaction between buildings and their occupants could impact human behavior.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Narang, Sahil; Best, Andrew; Shapiro, Ari; Manocha, Dinesh
Generating Virtual Avatars with Personalized Walking Gaits using Commodity Hardware Proceedings Article
In: Proceedings of the on Thematic Workshops of ACM Multimedia 2017 - Thematic Workshops '17, pp. 219–227, ACM Press, Mountain View, California, USA, 2017, ISBN: 978-1-4503-5416-5.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{narang_generating_2017,
title = {Generating Virtual Avatars with Personalized Walking Gaits using Commodity Hardware},
author = {Sahil Narang and Andrew Best and Ari Shapiro and Dinesh Manocha},
url = {http://dl.acm.org/citation.cfm?doid=3126686.3126766},
doi = {10.1145/3126686.3126766},
isbn = {978-1-4503-5416-5},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of the on Thematic Workshops of ACM Multimedia 2017 - Thematic Workshops '17},
pages = {219–227},
publisher = {ACM Press},
address = {Mountain View, California, USA},
abstract = {We present a novel algorithm for generating virtual avatars which move like the represented human subject, using inexpensive sensors & commodity hardware. Our algorithm is based on a perceptual study that evaluates self-recognition and similarity of gaits rendered on virtual avatars. We identify discriminatory features of human gait and propose a data-driven synthesis algorithm that can generate a set of similar gaits from a single walker. These features are combined to automatically synthesize personalized gaits for a human user from noisy motion capture data. The overall approach is robust and can generate new gaits with little or no artistic intervention using commodity sensors in a simple laboratory setting. We demonstrate our approach's application in rapidly animating virtual avatars of new users with personalized gaits, as well as procedurally generating distinct but similar "families" of gait in virtual environments.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tsang, Michael; Korolik, Vadim; Scherer, Stefan; Matarić, Maja
Comparing models for gesture recognition of children's bullying behaviors Proceedings Article
In: Affective Computing and Intelligent Interaction (ACII), 2017 Seventh International Conference on, pp. 138–145, IEEE, San Antonio, TX, 2017.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tsang_comparing_2017,
title = {Comparing models for gesture recognition of children's bullying behaviors},
author = {Michael Tsang and Vadim Korolik and Stefan Scherer and Maja Matarić},
url = {https://ieeexplore.ieee.org/abstract/document/8273591/},
doi = {10.1109/ACII.2017.8273591},
year = {2017},
date = {2017-10-01},
booktitle = {Affective Computing and Intelligent Interaction (ACII), 2017 Seventh International Conference on},
pages = {138–145},
publisher = {IEEE},
address = {San Antonio, TX},
abstract = {We explored gesture recognition applied to the problem of classifying natural physical bullying behaviors by children. To capture natural bullying behavior data, we developed a humanoid robot that used hand-coded gesture recognition to identify basic physical bullying gestures and responded by explaining why the gestures were inappropriate. Children interacted with the robot by trying various bullying behaviors, thereby allowing us to collect a natural bullying behavior dataset for training the classifiers. We trained three different sequence classifiers using the collected data and compared their effectiveness at classifying different types of common physical bullying behaviors. Overall, Hidden Conditional Random Fields achieved the highest average F1 score (0.645) over all tested gesture classes.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ringeval, Fabien; Schuller, Björn; Valstar, Michel; Gratch, Jonathan; Cowie, Roddy; Pantic, Maja
Summary for AVEC 2017: Real-life Depression and Affect Challenge and Workshop Proceedings Article
In: Proceedings of the 2017 ACM on Multimedia Conference, pp. 1963–1964, ACM Press, Mountain View, CA, 2017, ISBN: 978-1-4503-4906-2.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{ringeval_summary_2017,
title = {Summary for AVEC 2017: Real-life Depression and Affect Challenge and Workshop},
author = {Fabien Ringeval and Björn Schuller and Michel Valstar and Jonathan Gratch and Roddy Cowie and Maja Pantic},
url = {http://dl.acm.org/citation.cfm?doid=3123266.3132049},
doi = {10.1145/3123266.3132049},
isbn = {978-1-4503-4906-2},
year = {2017},
date = {2017-10-01},
booktitle = {Proceedings of the 2017 ACM on Multimedia Conference},
pages = {1963–1964},
publisher = {ACM Press},
address = {Mountain View, CA},
abstract = {The seventh Audio-Visual Emotion Challenge and workshop AVEC 2017 was held in conjunction with ACM Multimedia'17. This year, the AVEC series addresses two distinct sub-challenges: emotion recognition and depression detection. The Affect Sub-Challenge is based on a novel dataset of human-human interactions recorded 'in-the-wild', whereas the Depression Sub-Challenge is based on the same dataset as the one used in AVEC 2016, with human-agent interactions. In this summary, we mainly describe participation and its conditions.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Krämer, Nicole C.; Lucas, Gale; Schmitt, Lea; Gratch, Jonathan
In: International Journal of Human-Computer Studies, 2017, ISSN: 10715819.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{kramer_social_2017,
title = {Social Snacking with a virtual agent – On the interrelation of need to belong and effects of social responsiveness when interacting with artificial entities},
author = {Nicole C. Krämer and Gale Lucas and Lea Schmitt and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1071581917301271},
doi = {10.1016/j.ijhcs.2017.09.001},
issn = {10715819},
year = {2017},
date = {2017-09-01},
journal = {International Journal of Human-Computer Studies},
abstract = {Based on considerations that people´s need to belong can be temporarily satisfied by “social snacking” (Gardner et al., 2005) in the sense that in absence of social interactions which adequately satisfy belongingness needs surrogates can bridge lonely times, it was tested whether the interaction with a virtual agent can serve to ease the need for social contact. In a between subjects experimental setting, 79 participants interacted with a virtual agent who either displayed socially responsive nonverbal behavior or not. Results demonstrate that although there was no main effect of socially responsive behavior on participants´ subjective experience of rapport and on connectedness with the agent, those people with a high need to belong reported less willingness to engage in social activities after the interaction with a virtual agent – but only if the agent displayed socially responsive behavior.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Shapiro, Ari; Schwartz, David; Lewine, Gabrielle; Feng, Andrew Wei-Wen
Virtual Role-Play with Rapid Avatars Book Section
In: Intelligent Virtual Agents, vol. 10498, pp. 463–466, Springer International Publishing, Cham, Switzerland, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@incollection{wang_virtual_2017,
title = {Virtual Role-Play with Rapid Avatars},
author = {Ning Wang and Ari Shapiro and David Schwartz and Gabrielle Lewine and Andrew Wei-Wen Feng},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_59},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {10498},
pages = {463–466},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers possess great potential to serve as powerful models for behavioral change. An emerging technology, the Rapid Avatar Capture and Simulation (RACAS), enables low-cost and high-speed scanning of a human user and creation of a digital doppelganger that is a fully animatable virtual 3D model of the user. We designed a virtual role-playing game, DELTA, with digital doppelgangers to influence a human user’s attitude to-wards sexism on college campuses. In this demonstration, we will showcase the RACAS system and the DELTA game.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Pincus, Eli; Traum, David
An Incremental Response Policy in an Automatic Word-Game Proceedings Article
In: Proceedings of IVA 2017 Workshop on Conversational Interruptions in Human-Agent Interactions, Stockholm, Sweden, 2017.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{pincus_incremental_2017,
title = {An Incremental Response Policy in an Automatic Word-Game},
author = {Eli Pincus and David Traum},
url = {http://people.ict.usc.edu/ traum/Papers/pincus_traum-cihai2017.pdf},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of IVA 2017 Workshop on Conversational Interruptions in Human-Agent Interactions},
address = {Stockholm, Sweden},
abstract = {Turn-taking is an important aspect of human-human and human-computer interaction. Rapid turn-taking is a feature of human-human interaction that is difficult for today’s dialogue systems to emulate. For example, typical humanhuman interactions can involve an original sending interlocutor changing or stopping their speech mid-utterance as a result of overlapping speech from the other interlocutor. The overlapping utterances from the other interlocutor are typically called barge-in utterances. An example of this phenomena is seen in the two turns of dialogue in the top half of Figure 1. In this dialogue segment Student A first reveals his test score in the original utterance. Student A then begins to tell student B that he had heard Student B got a perfect score. Student B interrupts Student A with a barge-in utterance that contains new information (that actually he had not performed well on the test) causing Student A to halt his speech and not finish his original utterance. We call the unspoken part of student A’s original utterance Student A’s originally intended utterance. Student A then makes a decision based on the new information to not say his originally intended utterance. This is likely due to the originally intended utterance no longer being appropriate considering the new information made available to Student A. Student A then makes an intelligent next choice of what to say which can be seen in Student A’s updated utterance which takes into account the new information contained in Student B’s barge-in utterance. In this work we refer to Student A’s dialogue move as an intelligent update.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morales, Michelle Renee; Scherer, Stefan; Levitan, Rivka
OpenMM: An Open-Source Multimodal Feature Extraction Tool Proceedings Article
In: Proceedings of Interspeech 2017, pp. 3354–3358, ISCA, Stockholm, Sweden, 2017.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{morales_openmm_2017,
title = {OpenMM: An Open-Source Multimodal Feature Extraction Tool},
author = {Michelle Renee Morales and Stefan Scherer and Rivka Levitan},
url = {https://www.researchgate.net/publication/319185055_OpenMM_An_Open-Source_Multimodal_Feature_Extraction_Tool},
doi = {10.21437/Interspeech.2017-1382},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of Interspeech 2017},
pages = {3354–3358},
publisher = {ISCA},
address = {Stockholm, Sweden},
abstract = {The primary use of speech is in face-to-face interactions andsituational context and human behavior therefore intrinsicallyshape and affect communication. In order to usefully modelsituational awareness, machines must have access to the samestreams of information humans have access to. In other words,we need to provide machines with features that represent eachcommunicative modality: face and gesture, voice and speech,and language. This paper presents OpenMM: an open-sourcemultimodal feature extraction tool. We build upon existingopen-source repositories to present the first publicly availabletool for multimodal feature extraction. The tool provides apipeline for researchers to easily extract visual and acousticfeatures. In addition, the tool also performs automatic speechrecognition (ASR) and then uses the transcripts to extract lin-guistic features. We evaluate the OpenMM’s multimodal fea-ture set on deception, depression and sentiment classificationtasks and show its performance is very promising. This tool pro-vides researchers with a simple way of extracting multimodalfeatures and consequently a richer and more robust feature rep-resentation for machine learning tasks. OpenMM: An Open-Source Multimodal Feature Extraction Tool (PDF Download Available). Available from: https://www.researchgate.net/publication/319185055_OpenMM_An_Open-Source_Multimodal_Feature_Extraction_Tool [accessed Sep 13, 2017].},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kennedy, James; Leite, Iolanda; Pereira, André; Sun, Ming; Li, Boyang; Jain, Rishub; Cheng, Ricson; Pincus, Eli; Carter, Elizabeth J.; Lehman, Jill Fain
Learning and Reusing Dialog for Repeated Interactions with a Situated Social Agent Proceedings Article
In: Proceedings of the International Conference on Intelligent Virtual Agents, pp. 192–204, Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kennedy_learning_2017,
title = {Learning and Reusing Dialog for Repeated Interactions with a Situated Social Agent},
author = {James Kennedy and Iolanda Leite and André Pereira and Ming Sun and Boyang Li and Rishub Jain and Ricson Cheng and Eli Pincus and Elizabeth J. Carter and Jill Fain Lehman},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_22},
doi = {10.1007/978-3-319-67401-8_22},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the International Conference on Intelligent Virtual Agents},
volume = {10498},
pages = {192–204},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {Content authoring for conversations is a limiting factor in creating verbal interactions with intelligent virtual agents. Building on techniques utilizing semi-situated learning in an incremental crowdworking pipeline, this paper introduces an embodied agent that self-authors its own dialog for social chat. In particular, the autonomous use of crowdworkers is supplemented with a generalization method that borrows and assesses the validity of dialog across conversational states. We argue that the approach offers a community-focused tailoring of dialog responses that is not available in approaches that rely solely on statistical methods across big data. We demonstrate the advantages that this can bring to interactions through data collected from 486 conversations between a situated social agent and 22 users during a 3 week long evaluation period.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Massachi, Talie; Scherer, Stefan
Racing Heart and Sweaty Palms What Influences Users’ Self-Assessments and Physiological Signals When Interacting With Virtual Audiences? Proceedings Article
In: Proceedings of the International Conference on Intelligent Virtual Agents, pp. 83–86, Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{chollet_racing_2017,
title = {Racing Heart and Sweaty Palms What Influences Users’ Self-Assessments and Physiological Signals When Interacting With Virtual Audiences?},
author = {Mathieu Chollet and Talie Massachi and Stefan Scherer},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_9},
doi = {10.1007/978-3-319-67401-8_9},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the International Conference on Intelligent Virtual Agents},
volume = {10498},
pages = {83–86},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {In psychotherapy, virtual audiences have been shown to promote successful outcomes when used to help treating public speaking anxiety. Additionally, early experiments have shown its potential to help improve public speaking ability. However, it is still unclear to what extent certain factors, such as audience non-verbal behaviors, impact users when interacting with a virtual audience. In this paper, we design an experimental study to investigate users’ self-assessments and physiological states when interacting with a virtual audience. Our results showed that virtual audience behaviors did not influence participants self-assessments or physiological responses, which were instead predominantly determined by participants’ prior anxiety levels.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morales, Michelle Renee; Scherer, Stefan; Levitan, Rivka
A Cross-modal Review of Indicators for Depression Detection Systems Proceedings Article
In: Proceedings of the Fourth Workshop on Computational Linguistics and Clinical Psychology, pp. 1–12, Association for Computational Linguistics, Vancouver, Canada, 2017.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{morales_cross-modal_2017,
title = {A Cross-modal Review of Indicators for Depression Detection Systems},
author = {Michelle Renee Morales and Stefan Scherer and Rivka Levitan},
url = {http://aclweb.org/anthology/W17-3101},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the Fourth Workshop on Computational Linguistics and Clinical Psychology},
pages = {1–12},
publisher = {Association for Computational Linguistics},
address = {Vancouver, Canada},
abstract = {Automatic detection of depression has attracted increasing attention from researchers in psychology, computer science, linguistics, and related disciplines. As a result, promising depression detection systems have been reported. This paper surveys these efforts by presenting the first cross-modal review of depression detection systems and discusses best practices and most promising approaches to this task.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan
Human-Like Agents for Repeated Negotiation Proceedings Article
In: Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence, pp. 5189–5190, International Joint Conferences on Artificial Intelligence Organization, Melbourne, Australia, 2017, ISBN: 978-0-9992411-0-3.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mell_human-like_2017,
title = {Human-Like Agents for Repeated Negotiation},
author = {Johnathan Mell},
url = {https://www.ijcai.org/proceedings/2017/754},
doi = {10.24963/ijcai.2017/754},
isbn = {978-0-9992411-0-3},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence},
pages = {5189–5190},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
address = {Melbourne, Australia},
abstract = {Virtual agents have been used as tools in negotiation—from acting as mediators to manifesting as full-fledged conversational partners. Virtual agents are a powerful tool for teaching negotiation skills, but require an accurate model of human behavior to perform well both as partners and teachers. The work proposed here aims to expand the current horizon of virtual negotiating agents to utilize human-like strategies. Further agents developed using this framework should be cognizant of the social factors influencing negotiation, including reputation effects and the implications of long-term repeated relationships. A roadmap of current efforts to develop agent platforms and future expansions is discussed.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Kyusong; Zhao, Tiancheng; Du, Yulun; Cai, Edward; Lu, Allen; Pincus, Eli; Traum, David; Ultes, Stefan; Rojas-Barahona, Lina M.; Gasic, Milica; Young, Steve; Eskenazi, Maxine
DialPort, Gone Live: An Update After A Year of Development Proceedings Article
In: Proceedings of the SIGDIAL 2017 Conference, Association for Computational Linguistics, Saarbruecken Germany, 2017.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{lee_dialport_2017,
title = {DialPort, Gone Live: An Update After A Year of Development},
author = {Kyusong Lee and Tiancheng Zhao and Yulun Du and Edward Cai and Allen Lu and Eli Pincus and David Traum and Stefan Ultes and Lina M. Rojas-Barahona and Milica Gasic and Steve Young and Maxine Eskenazi},
url = {http://www.sigdial.org/workshops/conference18/proceedings/index.html},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGDIAL 2017 Conference},
publisher = {Association for Computational Linguistics},
address = {Saarbruecken Germany},
abstract = {DialPort collects user data for connected spoken dialog systems. At present six systems are linked to a central portal that directs the user to the applicable system and suggests systems that the user may be interested in. User data has started to flow into the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Scherer, Stefan
Perception of Virtual Audiences Journal Article
In: IEEE Computer Graphics and Applications, vol. 37, no. 4, pp. 50–59, 2017, ISSN: 0272-1716.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{chollet_perception_2017,
title = {Perception of Virtual Audiences},
author = {Mathieu Chollet and Stefan Scherer},
url = {http://ieeexplore.ieee.org/abstract/document/8013501/},
doi = {10.1109/MCG.2017.3271465},
issn = {0272-1716},
year = {2017},
date = {2017-08-01},
journal = {IEEE Computer Graphics and Applications},
volume = {37},
number = {4},
pages = {50–59},
abstract = {A growing body of evidence shows that virtual audiences are a valuable tool in the treatment of social anxiety, and recent works show that it also a useful in public-speaking training programs. However, little research has focused on how such audiences are perceived and on how the behavior of virtual audiences can be manipulated to create various types of stimuli. The authors used a crowdsourcing methodology to create a virtual audience nonverbal behavior model and, with it, created a dataset of videos with virtual audiences containing varying behaviors. Using this dataset, they investigated how virtual audiences are perceived and which factors affect this perception.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Manuvinakurike, Ramesh; DeVault, David; Georgila, Kallirroi
Using Reinforcement Learning to Model Incrementality in a Fast-Paced Dialogue Game Proceedings Article
In: Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, SIGDIAL, Saarbruecken Germany, 2017.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_using_2017,
title = {Using Reinforcement Learning to Model Incrementality in a Fast-Paced Dialogue Game},
author = {Ramesh Manuvinakurike and David DeVault and Kallirroi Georgila},
url = {http://www.manuvinakurike.com/papers/eve-2017.pdf},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue},
publisher = {SIGDIAL},
address = {Saarbruecken Germany},
abstract = {We apply Reinforcement Learning (RL) to the problem of incremental dialogue policy learning in the context of a fast-paced dialogue game. We compare the policy learned by RL with a high performance baseline policy which has been shown to perform very efficiently (nearly as well as humans) in this dialogue game. The RL policy outperforms the baseline policy in offline simulations (based on real user data). We provide a detailed comparison of the RL policy and the baseline policy, including information about how much effort and time it took to develop each one of them. We also highlight the cases where the RL policy performs better, and show that understanding the RL policy can provide valuable insights which can inform the creation of an even better rule-based policy.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Artstein, Ron
Lessons in Dialogue System Deployment Proceedings Article
In: Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 352–355, Association for Computational Linguistics, Saarbruecken Germany, 2017.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_lessons_2017,
title = {Lessons in Dialogue System Deployment},
author = {Anton Leuski and Ron Artstein},
url = {http://www.sigdial.org/workshops/conference18/proceedings/index.html},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {352–355},
publisher = {Association for Computational Linguistics},
address = {Saarbruecken Germany},
abstract = {We analyze deployment of an interactive dialogue system in an environment where deep technical expertise might not be readily available. The initial version was created using a collection of research tools. We summarize a number of challenges with its deployment at two museums and describe a new system that simplifies the installation and user interface; reduces reliance on 3rd-party software; and provides a robust data collection mechanism.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Hoegen, Rens; Lan, Wei; Rusow, Joshua; Singla, Karan; Yin, Xusen; Artstein, Ron; Leuski, Anton
SHIHbot: A Facebook chatbot for Sexual Health Information on HIV/AIDS Proceedings Article
In: Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 370–373, Association for Computational Linguistics, Saarbruecken Germany, 2017.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{brixey_shihbot_2017,
title = {SHIHbot: A Facebook chatbot for Sexual Health Information on HIV/AIDS},
author = {Jacqueline Brixey and Rens Hoegen and Wei Lan and Joshua Rusow and Karan Singla and Xusen Yin and Ron Artstein and Anton Leuski},
url = {http://www.sigdial.org/workshops/conference18/proceedings/index.html},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {370–373},
publisher = {Association for Computational Linguistics},
address = {Saarbruecken Germany},
abstract = {We present the implementation of an autonomous chatbot, SHIHbot, deployed on Facebook, which answers a wide variety of sexual health questions on HIV/AIDS. The chatbot's response database is compiled from professional medical and public health resources in order to provide reliable information to users. The system's backend is NPCEditor, a response selection platform trained on linked questions and answers; to our knowledge this is the first retrieval-based chatbot deployed on a large public social network.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}