Publications
Search
Gratch, Jonathan
The Promise and Peril of Automated Negotiators Journal Article
In: Negotiation Journal, vol. 37, no. 1, pp. 13–34, 2021, ISSN: 0748-4526, 1571-9979.
@article{gratch_promise_2021,
title = {The Promise and Peril of Automated Negotiators},
author = {Jonathan Gratch},
url = {https://onlinelibrary.wiley.com/doi/10.1111/nejo.12348},
doi = {10.1111/nejo.12348},
issn = {0748-4526, 1571-9979},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Negotiation Journal},
volume = {37},
number = {1},
pages = {13–34},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Jiaman; Kuang, Zhengfei; Zhao, Yajie; He, Mingming; Bladin, Karl; Li, Hao
Dynamic Facial Asset and Rig Generation from a Single Scan Journal Article
In: ACM Transactions on Graphics, vol. 39, no. 6, 2020.
@article{li_dynamic_2020,
title = {Dynamic Facial Asset and Rig Generation from a Single Scan},
author = {Jiaman Li and Zhengfei Kuang and Yajie Zhao and Mingming He and Karl Bladin and Hao Li},
url = {https://dl.acm.org/doi/10.1145/3414685.3417817},
doi = {doi/10.1145/3414685.3417817},
year = {2020},
date = {2020-11-01},
journal = {ACM Transactions on Graphics},
volume = {39},
number = {6},
abstract = {The creation of high-fidelity computer-generated (CG) characters for films and games is tied with intensive manual labor, which involves the creation of comprehensive facial assets that are often captured using complex hardware. To simplify and accelerate this digitization process, we propose a framework for the automatic generation of high-quality dynamic facial models, including rigs which can be readily deployed for artists to polish. Our framework takes a single scan as input to generate a set of personalized blendshapes, dynamic textures, as well as secondary facial components (e.g., teeth and eyeballs). Based on a facial database with over 4, 000 scans with pore-level details, varying expressions and identities, we adopt a self-supervised neural network to learn personalized blendshapes from a set of template expressions. We also model the joint distribution between identities and expressions, enabling the inference of a full set of personalized blendshapes with dynamic appearances from a single neutral input scan. Our generated personalized face rig assets are seamlessly compatible with professional production pipelines for facial animation and rendering. We demonstrate a highly robust and effective framework on a wide range of subjects, and showcase high-fidelity facial animations with automatically generated personalized dynamic textures.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English bilingual chatbot Book Section
In: Conversational Dialogue Systems for the Next Decade, pp. 41–50, Springer, Switzerland, 2020.
@incollection{brixey_masheli_2020,
title = {Masheli: A Choctaw-English bilingual chatbot},
author = {Jacqueline Brixey and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-8395-7_4},
year = {2020},
date = {2020-10-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
pages = {41–50},
publisher = {Springer},
address = {Switzerland},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to pratice conversational skills. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Varied Magnitude Favor Exchange in Human-Agent Negotiation Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
@inproceedings{mell_varied_2020,
title = {Varied Magnitude Favor Exchange in Human-Agent Negotiation},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3383652.3423866},
doi = {10.1145/3383652.3423866},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Agents that interact with humans in complex, social tasks need the ability to comprehend as well as employ common social strategies. In negotiation, there is ample evidence of such techniques being used efficaciously in human interchanges. In this work, we demonstrate a new design for socially-aware agents that employ one such technique—favor exchange—in order to gain value when playing against humans. In an online study of a robust, simulated social negotiation task, we show that these agents are effective against real human participants. In particular, we show that agents that ask for favors during the course of a repeated set of negotiations are more successful than those that do not. Additionally, previous work has demonstrated that humans can detect when agents betray them by failing to return favors that were previously promised. By contrast, this work indicates that these betrayal techniques may go largely undetected in complex scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Book Section
In: Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2020, ISBN: 9789811583940 9789811583957.
@incollection{gordon_towards_2020,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2020},
date = {2020-09-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145–160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.
Building preparedness in response to active shooter incidents: Results of focus group interviews Journal Article
In: International Journal of Disaster Risk Reduction, vol. 48, pp. 101617, 2020, ISSN: 22124209.
@article{zhu_building_2020,
title = {Building preparedness in response to active shooter incidents: Results of focus group interviews},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers},
url = {https://linkinghub.elsevier.com/retrieve/pii/S221242091931427X},
doi = {10.1016/j.ijdrr.2020.101617},
issn = {22124209},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Disaster Risk Reduction},
volume = {48},
pages = {101617},
abstract = {Active shooter incidents present an increasing threat to the American society. Many of these incidents occur in building environments, therefore, it is important to consider design and security elements in buildings to decrease the risk of active shooter incidents. This study aims to assess current security countermeasures and identify varying considerations associated with implementing these countermeasures. Fifteen participants, with expertise and experience in a diverse collection of operational and organizational backgrounds, including se curity, engineering, law enforcement, emergency management and policy making, participated in three focus group interviews. The participants identified a list of countermeasures that have been used for active shooter incidents. Important determinants for the effectiveness of countermeasures include their influence on occupants’ behavior during active shooter incidents, and occupants’ and administrators’ awareness of how to use them effectively. The nature of incidents (e.g., internal vs. external threats), building type (e.g., office buildings vs. school buildings), and occupants (e.g., students of different ages) were also recognized to affect the selection of appropriate countermeasures. The nexus between emergency preparedness and normal operations, and the importance of tradeoffs such as the ones between cost, aesthetics, maintenance needs and the influence on oc cupants’ daily activities were also discussed. To ensure the effectiveness of countermeasures and improve safety, the participants highlighted the importance of both training and practice, for occupants and administrators (e.g., first responder teams). The interview results suggested that further study of the relationship between security countermeasures and occupants’ and administrators’ responses, as well as efficient training approaches are needed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Artstein, Ron
ChoCo: a multimodal corpus of the Choctaw language Journal Article
In: Language Resources and Evaluation, 2020, ISSN: 1574-020X, 1574-0218.
@article{brixey_choco_2020,
title = {ChoCo: a multimodal corpus of the Choctaw language},
author = {Jacqueline Brixey and Ron Artstein},
url = {http://link.springer.com/10.1007/s10579-020-09494-5},
doi = {10.1007/s10579-020-09494-5},
issn = {1574-020X, 1574-0218},
year = {2020},
date = {2020-07-01},
journal = {Language Resources and Evaluation},
abstract = {This article presents a general use corpus for Choctaw, an American indigenous language (ISO 639-2: cho, endonym: Chahta). The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for this threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Czyzewski, Adam; Dalton, Jeffrey; Leuski, Anton
Agent Dialogue: A Platform for Conversational Information Seeking Experimentation Proceedings Article
In: Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2121–2124, ACM, Virtual Event China, 2020, ISBN: 978-1-4503-8016-4.
@inproceedings{czyzewski_agent_2020,
title = {Agent Dialogue: A Platform for Conversational Information Seeking Experimentation},
author = {Adam Czyzewski and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3397271.3401397},
doi = {10.1145/3397271.3401397},
isbn = {978-1-4503-8016-4},
year = {2020},
date = {2020-07-01},
booktitle = {Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {2121–2124},
publisher = {ACM},
address = {Virtual Event China},
abstract = {Conversational Information Seeking (CIS) is an emerging area of Information Retrieval focused on interactive search systems. As a result there is a need for new benchmark datasets and tools to enable their creation. In this demo we present the Agent Dialogue (AD) platform, an open-source system developed for researchers to perform Wizard-of-Oz CIS experiments. AD is a scalable cloud-native platform developed with Docker and Kubernetes with a flexible and modular micro-service architecture built on production-grade stateof-the-art open-source tools (Kubernetes, gRPC streaming, React, and Firebase). It supports varied front-ends and has the ability to interface with multiple existing agent systems, including Google Assistant and open-source search libraries. It includes support for centralized structure logging as well as offline relevance annotation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Stefanov, Kalin; Gratch, Jonathan
Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma Proceedings Article
In: Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG), pp. 8, IEEE, Buenos Aires, Argentina, 2020.
@inproceedings{lei_emotion_2020,
title = {Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma},
author = {Su Lei and Kalin Stefanov and Jonathan Gratch},
url = {https://www.computer.org/csdl/proceedings-article/fg/2020/307900a770/1kecIWT5wmA},
doi = {10.1109/FG47880.2020.00123},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)},
pages = {8},
publisher = {IEEE},
address = {Buenos Aires, Argentina},
abstract = {An extensive body of research has examined how specific emotional expressions shape social perceptions and social decisions, yet recent scholarship in emotion research has raised questions about the validity of emotion as a construct. In this article, we contrast the value of measuring emotional expressions with the more general construct of expressivity (in the sense of conveying a thought or emotion through any nonverbal behavior) and develop models that can automatically extract perceived expressivity from videos. Although less extensive, a solid body of research has shown expressivity to be an important element when studying interpersonal perception, particularly in psychiatric contexts. Here we examine the role expressivity plays in predicting social perceptions and decisions in the context of a social dilemma. We show that perceivers use more than facial expressions when making judgments of expressivity and see these expressions as conveying thoughts as well as emotions (although facial expressions and emotional attributions explain most of the variance in these judgments). We next show that expressivity can be predicted with high accuracy using Lasso and random forests. Our analysis shows that features related to motion dynamics are particularly important for modeling these judgments. We also show that learned models of expressivity have value in recognizing important aspects of a social situation. First, we revisit a previously published finding which showed that smile intensity was associated with the unexpectedness of outcomes in social dilemmas; instead, we show that expressivity is a better predictor (and explanation) of this finding. Second, we provide preliminary evidence that expressivity is useful for identifying “moments of interest” in a video sequence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Alavi, Seyed Hossein; Leuski, Anton; Traum, David
Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net? Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 735–742, European Language Resources Association, Marseille, France, 2020.
@inproceedings{alavi_which_2020,
title = {Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net?},
author = {Seyed Hossein Alavi and Anton Leuski and David Traum},
url = {https://www.aclweb.org/anthology/2020.lrec-1.92/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {735–742},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We compare two models for corpus-based selection of dialogue responses: one based on cross-language relevance with a cross-language LSTM model. Each model is tested on multiple corpora, collected from two different types of dialogue source material. Results show that while the LSTM model performs adequately on a very large corpus (millions of utterances), its performance is dominated by the cross-language relevance model for a more moderate-sized corpus (ten thousands of utterances).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bellas, Alexandria; Perrin, Stefawn; Malone, Brandon; Rogers, Kaytlin; Lucas, Gale; Phillips, Elizabeth; Tossell, Chad; de Visser, Ewart
Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams Proceedings Article
In: Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS), pp. 160–163, IEEE, Charlottesville, VA, USA, 2020, ISBN: 978-1-72817-145-6.
@inproceedings{bellas_rapport_2020,
title = {Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams},
author = {Alexandria Bellas and Stefawn Perrin and Brandon Malone and Kaytlin Rogers and Gale Lucas and Elizabeth Phillips and Chad Tossell and Ewart de Visser},
url = {https://ieeexplore.ieee.org/document/9106643/},
doi = {10.1109/SIEDS49339.2020.9106643},
isbn = {978-1-72817-145-6},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS)},
pages = {160–163},
publisher = {IEEE},
address = {Charlottesville, VA, USA},
abstract = {Conflicts may arise at any time during military debriefing meetings, especially in high intensity deployed settings. When such conflicts arise, it takes time to get everyone back into a receptive state of mind so that they engage in reflective discussion rather than unproductive arguing. It has been proposed by some that the use of social robots equipped with social abilities such as emotion regulation through rapport building may help to deescalate these situations to facilitate critical operational decisions. However, in military settings, the same AI agent used in the pre-brief of a mission may not be the same one used in the debrief. The purpose of this study was to determine whether a brief rapport-building session with a social robot could create a connection between a human and a robot agent, and whether consistency in the embodiment of the robot agent was necessary for maintaining this connection once formed. We report the results of a pilot study conducted at the United States Air Force Academy which simulated a military mission (i.e., Gravity and Strike). Participants’ connection with the agent, sense of trust, and overall likeability revealed that early rapport building can be beneficial for military missions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Zeng; Xu, Yuanlu; Lassner, Christoph; Li, Hao; Tung, Tony
ARCH: Animatable Reconstruction of Clothed Humans Proceedings Article
In: Proceedings of the CVPR 2020, pp. 3090–3099, IEEE, Seattle, Washington, 2020.
@inproceedings{huang_arch_2020,
title = {ARCH: Animatable Reconstruction of Clothed Humans},
author = {Zeng Huang and Yuanlu Xu and Christoph Lassner and Hao Li and Tony Tung},
url = {https://www.computer.org/csdl/proceedings-article/cvpr/2020/716800d090/1m3nz4mKHzG},
doi = {10.1109/CVPR42600.2020.00316},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
pages = {3090--3099},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {In this paper, we propose ARCH (Animatable Reconstruction of Clothed Humans), a novel end-to-end framework for accurate reconstruction of animation-ready 3D clothed humans from a monocular image. Existing approaches to digitize 3D humans struggle to handle pose variations and recover details. Also, they do not produce models that are animation ready. In contrast, ARCH is a learned pose-aware model that produces detailed 3D rigged full-body human avatars from a single unconstrained RGB image. A Semantic Space and a Semantic Deformation Field are created using a parametric 3D body estimator. They allow the transformation of 2D/3D clothed humans into a canonical space, reducing ambiguities in geometry caused by pose variations and occlusions in training data. Detailed surface geometry and appearance are learned using an implicit function representation with spatial local features. Furthermore, we propose additional per-pixel supervision on the 3D reconstruction using opacity-aware differentiable rendering. Our experiments indicate that ARCH increases the fidelity of the reconstructed humans. We obtain more than 50% lower reconstruction errors for standard metrics compared to state-of-the-art methods on public datasets. We also show numerous qualitative examples of animated, high-quality reconstructed avatars unseen in the literature so far.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gennaro, Mauro; Krumhuber, Eva G.; Lucas, Gale
Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood Journal Article
In: Frontiers in Psychology, vol. 10, pp. 3061, 2020, ISSN: 1664-1078.
@article{de_gennaro_effectiveness_2020,
title = {Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood},
author = {Mauro Gennaro and Eva G. Krumhuber and Gale Lucas},
url = {https://www.frontiersin.org/article/10.3389/fpsyg.2019.03061/full},
doi = {10.3389/fpsyg.2019.03061},
issn = {1664-1078},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Psychology},
volume = {10},
pages = {3061},
abstract = {From past research it is well known that social exclusion has detrimental consequences for mental health. To deal with these adverse effects, socially excluded individuals frequently turn to other humans for emotional support. While chatbots can elicit social and emotional responses on the part of the human interlocutor, their effectiveness in the context of social exclusion has not been investigated. In the present study, we examined whether an empathic chatbot can serve as a buffer against the adverse effects of social ostracism. After experiencing exclusion on social media, participants were randomly assigned to either talk with an empathetic chatbot about it (e.g., “I’m sorry that this happened to you”) or a control condition where their responses were merely acknowledged (e.g., “Thank you for your feedback”). Replicating previous research, results revealed that experiences of social exclusion dampened the mood of participants. Interacting with an empathetic chatbot, however, appeared to have a mitigating impact. In particular, participants in the chatbot intervention condition reported higher mood than those in the control condition. Theoretical, methodological, and practical implications, as well as directions for future research are discussed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Srinivasan, Balaji Vasan; Chhaya, Niyati
Generating Formality-Tuned Summaries Using Input-Dependent Rewards Proceedings Article
In: Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pp. 833–842, Association for Computational Linguistics, Hong Kong, China, 2019.
@inproceedings{chawla_generating_2019,
title = {Generating Formality-Tuned Summaries Using Input-Dependent Rewards},
author = {Kushal Chawla and Balaji Vasan Srinivasan and Niyati Chhaya},
url = {https://www.aclweb.org/anthology/K19-1078},
doi = {10.18653/v1/K19-1078},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)},
pages = {833–842},
publisher = {Association for Computational Linguistics},
address = {Hong Kong, China},
abstract = {Abstractive text summarization aims at generating human-like summaries by understanding and paraphrasing the given input content. Recent efforts based on sequence-to-sequence networks only allow the generation of a single summary. However, it is often desirable to accommodate the psycho-linguistic preferences of the intended audience while generating the summaries. In this work, we present a reinforcement learning based approach to generate formality-tailored summaries for an input article. Our novel input-dependent reward function aids in training the model with stylistic feedback on sampled and ground-truth summaries together. Once trained, the same model can generate formal and informal summary variants. Our automated and qualitative evaluations show the viability of the proposed framework.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2021
Gratch, Jonathan
The Promise and Peril of Automated Negotiators Journal Article
In: Negotiation Journal, vol. 37, no. 1, pp. 13–34, 2021, ISSN: 0748-4526, 1571-9979.
Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{gratch_promise_2021,
title = {The Promise and Peril of Automated Negotiators},
author = {Jonathan Gratch},
url = {https://onlinelibrary.wiley.com/doi/10.1111/nejo.12348},
doi = {10.1111/nejo.12348},
issn = {0748-4526, 1571-9979},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Negotiation Journal},
volume = {37},
number = {1},
pages = {13–34},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2020
Li, Jiaman; Kuang, Zhengfei; Zhao, Yajie; He, Mingming; Bladin, Karl; Li, Hao
Dynamic Facial Asset and Rig Generation from a Single Scan Journal Article
In: ACM Transactions on Graphics, vol. 39, no. 6, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Graphics
@article{li_dynamic_2020,
title = {Dynamic Facial Asset and Rig Generation from a Single Scan},
author = {Jiaman Li and Zhengfei Kuang and Yajie Zhao and Mingming He and Karl Bladin and Hao Li},
url = {https://dl.acm.org/doi/10.1145/3414685.3417817},
doi = {doi/10.1145/3414685.3417817},
year = {2020},
date = {2020-11-01},
journal = {ACM Transactions on Graphics},
volume = {39},
number = {6},
abstract = {The creation of high-fidelity computer-generated (CG) characters for films and games is tied with intensive manual labor, which involves the creation of comprehensive facial assets that are often captured using complex hardware. To simplify and accelerate this digitization process, we propose a framework for the automatic generation of high-quality dynamic facial models, including rigs which can be readily deployed for artists to polish. Our framework takes a single scan as input to generate a set of personalized blendshapes, dynamic textures, as well as secondary facial components (e.g., teeth and eyeballs). Based on a facial database with over 4, 000 scans with pore-level details, varying expressions and identities, we adopt a self-supervised neural network to learn personalized blendshapes from a set of template expressions. We also model the joint distribution between identities and expressions, enabling the inference of a full set of personalized blendshapes with dynamic appearances from a single neutral input scan. Our generated personalized face rig assets are seamlessly compatible with professional production pipelines for facial animation and rendering. We demonstrate a highly robust and effective framework on a wide range of subjects, and showcase high-fidelity facial animations with automatically generated personalized dynamic textures.},
keywords = {ARO-Coop, Graphics},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English bilingual chatbot Book Section
In: Conversational Dialogue Systems for the Next Decade, pp. 41–50, Springer, Switzerland, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Natural Language, UARC, Virtual Humans
@incollection{brixey_masheli_2020,
title = {Masheli: A Choctaw-English bilingual chatbot},
author = {Jacqueline Brixey and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-8395-7_4},
year = {2020},
date = {2020-10-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
pages = {41–50},
publisher = {Springer},
address = {Switzerland},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to pratice conversational skills. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
keywords = {ARO-Coop, Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Varied Magnitude Favor Exchange in Human-Agent Negotiation Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{mell_varied_2020,
title = {Varied Magnitude Favor Exchange in Human-Agent Negotiation},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3383652.3423866},
doi = {10.1145/3383652.3423866},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Agents that interact with humans in complex, social tasks need the ability to comprehend as well as employ common social strategies. In negotiation, there is ample evidence of such techniques being used efficaciously in human interchanges. In this work, we demonstrate a new design for socially-aware agents that employ one such technique—favor exchange—in order to gain value when playing against humans. In an online study of a robust, simulated social negotiation task, we show that these agents are effective against real human participants. In particular, we show that agents that ask for favors during the course of a repeated set of negotiations are more successful than those that do not. Additionally, previous work has demonstrated that humans can detect when agents betray them by failing to return favors that were previously promised. By contrast, this work indicates that these betrayal techniques may go largely undetected in complex scenarios.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Book Section
In: Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2020, ISBN: 9789811583940 9789811583957.
Abstract | Links | BibTeX | Tags: ARO-Coop, Dialogue, Natural Language, UARC, Virtual Humans
@incollection{gordon_towards_2020,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2020},
date = {2020-09-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145–160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
keywords = {ARO-Coop, Dialogue, Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.
Building preparedness in response to active shooter incidents: Results of focus group interviews Journal Article
In: International Journal of Disaster Risk Reduction, vol. 48, pp. 101617, 2020, ISSN: 22124209.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{zhu_building_2020,
title = {Building preparedness in response to active shooter incidents: Results of focus group interviews},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers},
url = {https://linkinghub.elsevier.com/retrieve/pii/S221242091931427X},
doi = {10.1016/j.ijdrr.2020.101617},
issn = {22124209},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Disaster Risk Reduction},
volume = {48},
pages = {101617},
abstract = {Active shooter incidents present an increasing threat to the American society. Many of these incidents occur in building environments, therefore, it is important to consider design and security elements in buildings to decrease the risk of active shooter incidents. This study aims to assess current security countermeasures and identify varying considerations associated with implementing these countermeasures. Fifteen participants, with expertise and experience in a diverse collection of operational and organizational backgrounds, including se curity, engineering, law enforcement, emergency management and policy making, participated in three focus group interviews. The participants identified a list of countermeasures that have been used for active shooter incidents. Important determinants for the effectiveness of countermeasures include their influence on occupants’ behavior during active shooter incidents, and occupants’ and administrators’ awareness of how to use them effectively. The nature of incidents (e.g., internal vs. external threats), building type (e.g., office buildings vs. school buildings), and occupants (e.g., students of different ages) were also recognized to affect the selection of appropriate countermeasures. The nexus between emergency preparedness and normal operations, and the importance of tradeoffs such as the ones between cost, aesthetics, maintenance needs and the influence on oc cupants’ daily activities were also discussed. To ensure the effectiveness of countermeasures and improve safety, the participants highlighted the importance of both training and practice, for occupants and administrators (e.g., first responder teams). The interview results suggested that further study of the relationship between security countermeasures and occupants’ and administrators’ responses, as well as efficient training approaches are needed.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Artstein, Ron
ChoCo: a multimodal corpus of the Choctaw language Journal Article
In: Language Resources and Evaluation, 2020, ISSN: 1574-020X, 1574-0218.
Abstract | Links | BibTeX | Tags: ARO-Coop, UARC, Virtual Humans
@article{brixey_choco_2020,
title = {ChoCo: a multimodal corpus of the Choctaw language},
author = {Jacqueline Brixey and Ron Artstein},
url = {http://link.springer.com/10.1007/s10579-020-09494-5},
doi = {10.1007/s10579-020-09494-5},
issn = {1574-020X, 1574-0218},
year = {2020},
date = {2020-07-01},
journal = {Language Resources and Evaluation},
abstract = {This article presents a general use corpus for Choctaw, an American indigenous language (ISO 639-2: cho, endonym: Chahta). The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for this threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {ARO-Coop, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Czyzewski, Adam; Dalton, Jeffrey; Leuski, Anton
Agent Dialogue: A Platform for Conversational Information Seeking Experimentation Proceedings Article
In: Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2121–2124, ACM, Virtual Event China, 2020, ISBN: 978-1-4503-8016-4.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{czyzewski_agent_2020,
title = {Agent Dialogue: A Platform for Conversational Information Seeking Experimentation},
author = {Adam Czyzewski and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3397271.3401397},
doi = {10.1145/3397271.3401397},
isbn = {978-1-4503-8016-4},
year = {2020},
date = {2020-07-01},
booktitle = {Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {2121–2124},
publisher = {ACM},
address = {Virtual Event China},
abstract = {Conversational Information Seeking (CIS) is an emerging area of Information Retrieval focused on interactive search systems. As a result there is a need for new benchmark datasets and tools to enable their creation. In this demo we present the Agent Dialogue (AD) platform, an open-source system developed for researchers to perform Wizard-of-Oz CIS experiments. AD is a scalable cloud-native platform developed with Docker and Kubernetes with a flexible and modular micro-service architecture built on production-grade stateof-the-art open-source tools (Kubernetes, gRPC streaming, React, and Firebase). It supports varied front-ends and has the ability to interface with multiple existing agent systems, including Google Assistant and open-source search libraries. It includes support for centralized structure logging as well as offline relevance annotation.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Stefanov, Kalin; Gratch, Jonathan
Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma Proceedings Article
In: Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG), pp. 8, IEEE, Buenos Aires, Argentina, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{lei_emotion_2020,
title = {Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma},
author = {Su Lei and Kalin Stefanov and Jonathan Gratch},
url = {https://www.computer.org/csdl/proceedings-article/fg/2020/307900a770/1kecIWT5wmA},
doi = {10.1109/FG47880.2020.00123},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)},
pages = {8},
publisher = {IEEE},
address = {Buenos Aires, Argentina},
abstract = {An extensive body of research has examined how specific emotional expressions shape social perceptions and social decisions, yet recent scholarship in emotion research has raised questions about the validity of emotion as a construct. In this article, we contrast the value of measuring emotional expressions with the more general construct of expressivity (in the sense of conveying a thought or emotion through any nonverbal behavior) and develop models that can automatically extract perceived expressivity from videos. Although less extensive, a solid body of research has shown expressivity to be an important element when studying interpersonal perception, particularly in psychiatric contexts. Here we examine the role expressivity plays in predicting social perceptions and decisions in the context of a social dilemma. We show that perceivers use more than facial expressions when making judgments of expressivity and see these expressions as conveying thoughts as well as emotions (although facial expressions and emotional attributions explain most of the variance in these judgments). We next show that expressivity can be predicted with high accuracy using Lasso and random forests. Our analysis shows that features related to motion dynamics are particularly important for modeling these judgments. We also show that learned models of expressivity have value in recognizing important aspects of a social situation. First, we revisit a previously published finding which showed that smile intensity was associated with the unexpectedness of outcomes in social dilemmas; instead, we show that expressivity is a better predictor (and explanation) of this finding. Second, we provide preliminary evidence that expressivity is useful for identifying “moments of interest” in a video sequence.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Alavi, Seyed Hossein; Leuski, Anton; Traum, David
Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net? Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 735–742, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{alavi_which_2020,
title = {Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net?},
author = {Seyed Hossein Alavi and Anton Leuski and David Traum},
url = {https://www.aclweb.org/anthology/2020.lrec-1.92/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {735–742},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We compare two models for corpus-based selection of dialogue responses: one based on cross-language relevance with a cross-language LSTM model. Each model is tested on multiple corpora, collected from two different types of dialogue source material. Results show that while the LSTM model performs adequately on a very large corpus (millions of utterances), its performance is dominated by the cross-language relevance model for a more moderate-sized corpus (ten thousands of utterances).},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARL, ARO-Coop, DoD, UARC, Virtual Humans
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {ARL, ARO-Coop, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bellas, Alexandria; Perrin, Stefawn; Malone, Brandon; Rogers, Kaytlin; Lucas, Gale; Phillips, Elizabeth; Tossell, Chad; de Visser, Ewart
Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams Proceedings Article
In: Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS), pp. 160–163, IEEE, Charlottesville, VA, USA, 2020, ISBN: 978-1-72817-145-6.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{bellas_rapport_2020,
title = {Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams},
author = {Alexandria Bellas and Stefawn Perrin and Brandon Malone and Kaytlin Rogers and Gale Lucas and Elizabeth Phillips and Chad Tossell and Ewart de Visser},
url = {https://ieeexplore.ieee.org/document/9106643/},
doi = {10.1109/SIEDS49339.2020.9106643},
isbn = {978-1-72817-145-6},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS)},
pages = {160–163},
publisher = {IEEE},
address = {Charlottesville, VA, USA},
abstract = {Conflicts may arise at any time during military debriefing meetings, especially in high intensity deployed settings. When such conflicts arise, it takes time to get everyone back into a receptive state of mind so that they engage in reflective discussion rather than unproductive arguing. It has been proposed by some that the use of social robots equipped with social abilities such as emotion regulation through rapport building may help to deescalate these situations to facilitate critical operational decisions. However, in military settings, the same AI agent used in the pre-brief of a mission may not be the same one used in the debrief. The purpose of this study was to determine whether a brief rapport-building session with a social robot could create a connection between a human and a robot agent, and whether consistency in the embodiment of the robot agent was necessary for maintaining this connection once formed. We report the results of a pilot study conducted at the United States Air Force Academy which simulated a military mission (i.e., Gravity and Strike). Participants’ connection with the agent, sense of trust, and overall likeability revealed that early rapport building can be beneficial for military missions.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Zeng; Xu, Yuanlu; Lassner, Christoph; Li, Hao; Tung, Tony
ARCH: Animatable Reconstruction of Clothed Humans Proceedings Article
In: Proceedings of the CVPR 2020, pp. 3090–3099, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Graphics
@inproceedings{huang_arch_2020,
title = {ARCH: Animatable Reconstruction of Clothed Humans},
author = {Zeng Huang and Yuanlu Xu and Christoph Lassner and Hao Li and Tony Tung},
url = {https://www.computer.org/csdl/proceedings-article/cvpr/2020/716800d090/1m3nz4mKHzG},
doi = {10.1109/CVPR42600.2020.00316},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
pages = {3090--3099},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {In this paper, we propose ARCH (Animatable Reconstruction of Clothed Humans), a novel end-to-end framework for accurate reconstruction of animation-ready 3D clothed humans from a monocular image. Existing approaches to digitize 3D humans struggle to handle pose variations and recover details. Also, they do not produce models that are animation ready. In contrast, ARCH is a learned pose-aware model that produces detailed 3D rigged full-body human avatars from a single unconstrained RGB image. A Semantic Space and a Semantic Deformation Field are created using a parametric 3D body estimator. They allow the transformation of 2D/3D clothed humans into a canonical space, reducing ambiguities in geometry caused by pose variations and occlusions in training data. Detailed surface geometry and appearance are learned using an implicit function representation with spatial local features. Furthermore, we propose additional per-pixel supervision on the 3D reconstruction using opacity-aware differentiable rendering. Our experiments indicate that ARCH increases the fidelity of the reconstructed humans. We obtain more than 50% lower reconstruction errors for standard metrics compared to state-of-the-art methods on public datasets. We also show numerous qualitative examples of animated, high-quality reconstructed avatars unseen in the literature so far.},
keywords = {ARO-Coop, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gennaro, Mauro; Krumhuber, Eva G.; Lucas, Gale
Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood Journal Article
In: Frontiers in Psychology, vol. 10, pp. 3061, 2020, ISSN: 1664-1078.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{de_gennaro_effectiveness_2020,
title = {Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood},
author = {Mauro Gennaro and Eva G. Krumhuber and Gale Lucas},
url = {https://www.frontiersin.org/article/10.3389/fpsyg.2019.03061/full},
doi = {10.3389/fpsyg.2019.03061},
issn = {1664-1078},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Psychology},
volume = {10},
pages = {3061},
abstract = {From past research it is well known that social exclusion has detrimental consequences for mental health. To deal with these adverse effects, socially excluded individuals frequently turn to other humans for emotional support. While chatbots can elicit social and emotional responses on the part of the human interlocutor, their effectiveness in the context of social exclusion has not been investigated. In the present study, we examined whether an empathic chatbot can serve as a buffer against the adverse effects of social ostracism. After experiencing exclusion on social media, participants were randomly assigned to either talk with an empathetic chatbot about it (e.g., “I’m sorry that this happened to you”) or a control condition where their responses were merely acknowledged (e.g., “Thank you for your feedback”). Replicating previous research, results revealed that experiences of social exclusion dampened the mood of participants. Interacting with an empathetic chatbot, however, appeared to have a mitigating impact. In particular, participants in the chatbot intervention condition reported higher mood than those in the control condition. Theoretical, methodological, and practical implications, as well as directions for future research are discussed.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2019
Chawla, Kushal; Srinivasan, Balaji Vasan; Chhaya, Niyati
Generating Formality-Tuned Summaries Using Input-Dependent Rewards Proceedings Article
In: Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pp. 833–842, Association for Computational Linguistics, Hong Kong, China, 2019.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{chawla_generating_2019,
title = {Generating Formality-Tuned Summaries Using Input-Dependent Rewards},
author = {Kushal Chawla and Balaji Vasan Srinivasan and Niyati Chhaya},
url = {https://www.aclweb.org/anthology/K19-1078},
doi = {10.18653/v1/K19-1078},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)},
pages = {833–842},
publisher = {Association for Computational Linguistics},
address = {Hong Kong, China},
abstract = {Abstractive text summarization aims at generating human-like summaries by understanding and paraphrasing the given input content. Recent efforts based on sequence-to-sequence networks only allow the generation of a single summary. However, it is often desirable to accommodate the psycho-linguistic preferences of the intended audience while generating the summaries. In this work, we present a reinforcement learning based approach to generate formality-tailored summaries for an input article. Our novel input-dependent reward function aids in training the model with stylistic feedback on sampled and ground-truth summaries together. Once trained, the same model can generate formal and informal summary variants. Our automated and qualitative evaluations show the viability of the proposed framework.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}