Publications
Search
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English bilingual chatbot Incollection
In: Conversational Dialogue Systems for the Next Decade, pp. 41–50, Springer, Switzerland, 2020.
@incollection{brixey_masheli_2020,
title = {Masheli: A Choctaw-English bilingual chatbot},
author = {Jacqueline Brixey and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-8395-7_4},
year = {2020},
date = {2020-10-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
pages = {41--50},
publisher = {Springer},
address = {Switzerland},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to pratice conversational skills. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.
Building preparedness in response to active shooter incidents: Results of focus group interviews Journal Article
In: International Journal of Disaster Risk Reduction, vol. 48, pp. 101617, 2020, ISSN: 22124209.
@article{zhu_building_2020,
title = {Building preparedness in response to active shooter incidents: Results of focus group interviews},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers},
url = {https://linkinghub.elsevier.com/retrieve/pii/S221242091931427X},
doi = {10.1016/j.ijdrr.2020.101617},
issn = {22124209},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Disaster Risk Reduction},
volume = {48},
pages = {101617},
abstract = {Active shooter incidents present an increasing threat to the American society. Many of these incidents occur in building environments, therefore, it is important to consider design and security elements in buildings to decrease the risk of active shooter incidents. This study aims to assess current security countermeasures and identify varying considerations associated with implementing these countermeasures. Fifteen participants, with expertise and experience in a diverse collection of operational and organizational backgrounds, including se curity, engineering, law enforcement, emergency management and policy making, participated in three focus group interviews. The participants identified a list of countermeasures that have been used for active shooter incidents. Important determinants for the effectiveness of countermeasures include their influence on occupants’ behavior during active shooter incidents, and occupants’ and administrators’ awareness of how to use them effectively. The nature of incidents (e.g., internal vs. external threats), building type (e.g., office buildings vs. school buildings), and occupants (e.g., students of different ages) were also recognized to affect the selection of appropriate countermeasures. The nexus between emergency preparedness and normal operations, and the importance of tradeoffs such as the ones between cost, aesthetics, maintenance needs and the influence on oc cupants’ daily activities were also discussed. To ensure the effectiveness of countermeasures and improve safety, the participants highlighted the importance of both training and practice, for occupants and administrators (e.g., first responder teams). The interview results suggested that further study of the relationship between security countermeasures and occupants’ and administrators’ responses, as well as efficient training approaches are needed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents Journal Article
In: International Journal of Semantic Computing, vol. 14, no. 03, pp. 315–332, 2020, ISSN: 1793-351X, 1793-7108.
@article{hartholt_multi-platform_2020,
title = {Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://www.worldscientific.com/doi/abs/10.1142/S1793351X20400127},
doi = {10.1142/S1793351X20400127},
issn = {1793-351X, 1793-7108},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Semantic Computing},
volume = {14},
number = {03},
pages = {315--332},
abstract = {We present an extension of the Virtual Human Toolkit to include a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The Toolkit uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation and rendering. It has been extended to support computing platforms beyond Windows by leveraging microservices. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in room-scale VR, autonomous AI in mobile AR, and real-time user performance feedback leveraging mobile sensors in headset AR.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Incollection
In: Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2020, ISBN: 9789811583940 9789811583957.
@incollection{gordon_towards_2020,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2020},
date = {2020-09-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145--160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Davis, Dan M.; Rizvi, Sanad Z.; Carr, Kayla; Swartout, William; Thacker, Raj; Shaw, Kenneth
Feasibility and usability of MentorPal, a framework for rapid development of virtual mentors Journal Article
In: Journal of Research on Technology in Education, pp. 1–23, 2020, ISSN: 1539-1523, 1945-0818.
@article{nye_feasibility_2020,
title = {Feasibility and usability of MentorPal, a framework for rapid development of virtual mentors},
author = {Benjamin D. Nye and Dan M. Davis and Sanad Z. Rizvi and Kayla Carr and William Swartout and Raj Thacker and Kenneth Shaw},
url = {https://www.tandfonline.com/doi/full/10.1080/15391523.2020.1771640},
doi = {10.1080/15391523.2020.1771640},
issn = {1539-1523, 1945-0818},
year = {2020},
date = {2020-07-01},
journal = {Journal of Research on Technology in Education},
pages = {1--23},
abstract = {One-on-one mentoring is an effective method to help novices with career development. However, traditional mentoring scales poorly. To address this problem, MentorPal emulates conversations with a panel of virtual mentors based on recordings of real STEM professionals. Students freely ask questions as they might in a career fair, while machine learning algorithms attempt to provide the best answers. MentorPal has developed strategies for the rapid development of new virtual mentors, where training data will be sparse. In a usability study, 31 high school students self-reported a) increased career knowledge and confidence, b) positive ease-of-use, and that c) mentors were helpful (87%) but often did not cover their preferred career (29%). Results demonstrate the feasibility of scalable virtual mentoring, but efficacy studies are needed to evaluate the impact of virtual mentors, particularly for groups with limited STEM opportunities.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Hartholt, Arno; Akinyemi, Dayo; Kubicek, Katarina; Rizzo, Albert (Skip); Kipke, Michele
In: HCI International 2020 - Posters, vol. 1225, pp. 304–307, Springer International Publishing, Cham, Switzerland, 2020, ISBN: 978-3-030-50728-2 978-3-030-50729-9.
@incollection{mozgai_development_2020,
title = {Development and Initial Feasibility Testing of the Virtual Research Navigator (VRN): A Public-Facing Agent-Based Educational System for Clinical Research Participation},
author = {Sharon Mozgai and Arno Hartholt and Dayo Akinyemi and Katarina Kubicek and Albert (Skip) Rizzo and Michele Kipke},
url = {http://link.springer.com/10.1007/978-3-030-50729-9_43},
doi = {10.1007/978-3-030-50729-9_43},
isbn = {978-3-030-50728-2 978-3-030-50729-9},
year = {2020},
date = {2020-07-01},
booktitle = {HCI International 2020 - Posters},
volume = {1225},
pages = {304--307},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {The overall goal of VRN is to develop a novel technology solution at Children’s Hospital Los Angeles (CHLA) to overcome barriers that prevent the recruitment of diverse patient populations to clinical trials by providing both caregivers and children with an interactive educational experience. This system consists of 1) an intelligent agent called Zippy that users interact with by keyboard or voice input, 2) a series of videos covering topics including Privacy, Consent and Benefits, and 3) a UI that guides users through all available content. Pre- and post-questionnaires assessed willingness to participate in clinical research and found participants either increased or maintained their level of willingness to participate in research studies. Additionally, qualitative analysis of interview data revealed participants rated the overall interaction favorably and believed Zippy to be more fun, less judgmental and less threatening than interacting with a human. Future iterations are in-progress based on the user-feedback},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Czyzewski, Adam; Dalton, Jeffrey; Leuski, Anton
Agent Dialogue: A Platform for Conversational Information Seeking Experimentation Inproceedings
In: Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2121–2124, ACM, Virtual Event China, 2020, ISBN: 978-1-4503-8016-4.
@inproceedings{czyzewski_agent_2020,
title = {Agent Dialogue: A Platform for Conversational Information Seeking Experimentation},
author = {Adam Czyzewski and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3397271.3401397},
doi = {10.1145/3397271.3401397},
isbn = {978-1-4503-8016-4},
year = {2020},
date = {2020-07-01},
booktitle = {Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {2121--2124},
publisher = {ACM},
address = {Virtual Event China},
abstract = {Conversational Information Seeking (CIS) is an emerging area of Information Retrieval focused on interactive search systems. As a result there is a need for new benchmark datasets and tools to enable their creation. In this demo we present the Agent Dialogue (AD) platform, an open-source system developed for researchers to perform Wizard-of-Oz CIS experiments. AD is a scalable cloud-native platform developed with Docker and Kubernetes with a flexible and modular micro-service architecture built on production-grade stateof-the-art open-source tools (Kubernetes, gRPC streaming, React, and Firebase). It supports varied front-ends and has the ability to interface with multiple existing agent systems, including Google Assistant and open-source search libraries. It includes support for centralized structure logging as well as offline relevance annotation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Artstein, Ron
ChoCo: a multimodal corpus of the Choctaw language Journal Article
In: Language Resources and Evaluation, 2020, ISSN: 1574-020X, 1574-0218.
@article{brixey_choco_2020,
title = {ChoCo: a multimodal corpus of the Choctaw language},
author = {Jacqueline Brixey and Ron Artstein},
url = {http://link.springer.com/10.1007/s10579-020-09494-5},
doi = {10.1007/s10579-020-09494-5},
issn = {1574-020X, 1574-0218},
year = {2020},
date = {2020-07-01},
journal = {Language Resources and Evaluation},
abstract = {This article presents a general use corpus for Choctaw, an American indigenous language (ISO 639-2: cho, endonym: Chahta). The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for this threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rayatdoost, Soheil; Rudrauf, David; Soleymani, Mohammad
Expression-Guided EEG Representation Learning for Emotion Recognition Inproceedings
In: Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3222–3226, IEEE, Barcelona, Spain, 2020, ISBN: 978-1-5090-6631-5.
@inproceedings{rayatdoost_expression-guided_2020,
title = {Expression-Guided EEG Representation Learning for Emotion Recognition},
author = {Soheil Rayatdoost and David Rudrauf and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9053004/},
doi = {10.1109/ICASSP40776.2020.9053004},
isbn = {978-1-5090-6631-5},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {3222--3226},
publisher = {IEEE},
address = {Barcelona, Spain},
abstract = {Learning a joint and coordinated representation between different modalities can improve multimodal emotion recognition. In this paper, we propose a deep representation learning approach for emotion recognition from electroencephalogram (EEG) signals guided by facial electromyogram (EMG) and electrooculogram (EOG) signals. We recorded EEG, EMG and EOG signals from 60 participants who watched 40 short videos and self-reported their emotions. A cross-modal encoder that jointly learns the features extracted from facial and ocular expressions and EEG responses was designed and evaluated on our recorded data and MAHOB-HCI, a publicly available database. We demonstrate that the proposed representation is able to improve emotion recognition performance. We also show that the learned representation can be transferred to a different database without EMG and EOG and achieve superior performance. Methods that fuse behavioral and neural responses can be deployed in wearable emotion recognition solutions, practical in situations in which computer vision expression recognition is not feasible.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Inproceedings
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Alavi, Seyed Hossein; Leuski, Anton; Traum, David
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 735–742, European Language Resources Association, Marseille, France, 2020.
@inproceedings{alavi_which_2020,
title = {Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net?},
author = {Seyed Hossein Alavi and Anton Leuski and David Traum},
url = {https://www.aclweb.org/anthology/2020.lrec-1.92/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {735--742},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We compare two models for corpus-based selection of dialogue responses: one based on cross-language relevance with a cross-language LSTM model. Each model is tested on multiple corpora, collected from two different types of dialogue source material. Results show that while the LSTM model performs adequately on a very large corpus (millions of utterances), its performance is dominated by the cross-language relevance model for a more moderate-sized corpus (ten thousands of utterances).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Stefanov, Kalin; Gratch, Jonathan
Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma Inproceedings
In: Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG), pp. 8, IEEE, Buenos Aires, Argentina, 2020.
@inproceedings{lei_emotion_2020,
title = {Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma},
author = {Su Lei and Kalin Stefanov and Jonathan Gratch},
url = {https://www.computer.org/csdl/proceedings-article/fg/2020/307900a770/1kecIWT5wmA},
doi = {10.1109/FG47880.2020.00123},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)},
pages = {8},
publisher = {IEEE},
address = {Buenos Aires, Argentina},
abstract = {An extensive body of research has examined how specific emotional expressions shape social perceptions and social decisions, yet recent scholarship in emotion research has raised questions about the validity of emotion as a construct. In this article, we contrast the value of measuring emotional expressions with the more general construct of expressivity (in the sense of conveying a thought or emotion through any nonverbal behavior) and develop models that can automatically extract perceived expressivity from videos. Although less extensive, a solid body of research has shown expressivity to be an important element when studying interpersonal perception, particularly in psychiatric contexts. Here we examine the role expressivity plays in predicting social perceptions and decisions in the context of a social dilemma. We show that perceivers use more than facial expressions when making judgments of expressivity and see these expressions as conveying thoughts as well as emotions (although facial expressions and emotional attributions explain most of the variance in these judgments). We next show that expressivity can be predicted with high accuracy using Lasso and random forests. Our analysis shows that features related to motion dynamics are particularly important for modeling these judgments. We also show that learned models of expressivity have value in recognizing important aspects of a social situation. First, we revisit a previously published finding which showed that smile intensity was associated with the unexpectedness of outcomes in social dilemmas; instead, we show that expressivity is a better predictor (and explanation) of this finding. Second, we provide preliminary evidence that expressivity is useful for identifying “moments of interest” in a video sequence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Gordon, Carla; Yanov, Volodymyr; Traum, David
Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers Inproceedings
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 726–734, European Language Resources Association, Marseille, France, 2020.
@inproceedings{georgila_predicting_2020,
title = {Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers},
author = {Kallirroi Georgila and Carla Gordon and Volodymyr Yanov and David Traum},
url = {https://www.aclweb.org/anthology/2020.lrec-1.91/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {726--734},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We collected a corpus of dialogues in a Wizard of Oz (WOz) setting in the Internet of Things (IoT) domain. We asked users participating in these dialogues to rate the system on a number of aspects, namely, intelligence, naturalness, personality, friendliness, their enjoyment, overall quality, and whether they would recommend the system to others. Then we asked dialogue observers, i.e., Amazon Mechanical Turkers (MTurkers), to rate these dialogues on the same aspects. We also generated simulated dialogues between dialogue policies and simulated users and asked MTurkers to rate them again on the same aspects. Using linear regression, we developed dialogue evaluation functions based on features from the simulated dialogues and the MTurkers’ ratings, the WOz dialogues and the MTurkers’ ratings, and the WOz dialogues and the WOz participants’ ratings. We applied all these dialogue evaluation functions to a held-out portion of our WOz dialogues, and we report results on the predictive power of these different types of dialogue evaluation functions. Our results suggest that for three conversational aspects (intelligence, naturalness, overall quality) just training evaluation functions on simulated data could be sufficient.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Leeds, Andrew; Rizzo, Albert ‘Skip’
Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma Inproceedings
In: Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems, pp. 8, ACM, Honolulu, HI, 2020.
@inproceedings{mozgai_iterative_2020,
title = {Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma},
author = {Sharon Mozgai and Arno Hartholt and Andrew Leeds and Albert ‘Skip’ Rizzo},
url = {https://dl.acm.org/doi/abs/10.1145/3334480.3375219},
doi = {10.1145/3334480.3375219},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems},
pages = {8},
publisher = {ACM},
address = {Honolulu, HI},
abstract = {This case study describes the expansion of the BRAVEMIND virtual reality exposure therapy (VRET) system from the domain of combat-related posttraumatic stress disorder (PTSD) to the domain of PTSD due to Military Sexual Trauma (MST). As VRET continues to demonstrate efficacy in treating PTSD across multiple trauma types and anxiety disorders, adapting existing systems and content to new domains while simultaneously maintaining clinical integrity is becoming a high priority. To develop BRAVEMIND-MST we engaged in an iterative participatory design process with psychologists, engineers, and artists. This first-person account of our collaborative development process focuses on three key areas (1) VR Environment, (2) User-Avatar State, and (3) Events, while detailing the challenges we encountered and lessons learned. This process culminated in eight design guidelines as a first-step in defining a VRET domain transfer methodology.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Inproceedings
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bellas, Alexandria; Perrin, Stefawn; Malone, Brandon; Rogers, Kaytlin; Lucas, Gale; Phillips, Elizabeth; Tossell, Chad; de Visser, Ewart
Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams Inproceedings
In: Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS), pp. 160–163, IEEE, Charlottesville, VA, USA, 2020, ISBN: 978-1-72817-145-6.
@inproceedings{bellas_rapport_2020,
title = {Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams},
author = {Alexandria Bellas and Stefawn Perrin and Brandon Malone and Kaytlin Rogers and Gale Lucas and Elizabeth Phillips and Chad Tossell and Ewart de Visser},
url = {https://ieeexplore.ieee.org/document/9106643/},
doi = {10.1109/SIEDS49339.2020.9106643},
isbn = {978-1-72817-145-6},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS)},
pages = {160--163},
publisher = {IEEE},
address = {Charlottesville, VA, USA},
abstract = {Conflicts may arise at any time during military debriefing meetings, especially in high intensity deployed settings. When such conflicts arise, it takes time to get everyone back into a receptive state of mind so that they engage in reflective discussion rather than unproductive arguing. It has been proposed by some that the use of social robots equipped with social abilities such as emotion regulation through rapport building may help to deescalate these situations to facilitate critical operational decisions. However, in military settings, the same AI agent used in the pre-brief of a mission may not be the same one used in the debrief. The purpose of this study was to determine whether a brief rapport-building session with a social robot could create a connection between a human and a robot agent, and whether consistency in the embodiment of the robot agent was necessary for maintaining this connection once formed. We report the results of a pilot study conducted at the United States Air Force Academy which simulated a military mission (i.e., Gravity and Strike). Participants’ connection with the agent, sense of trust, and overall likeability revealed that early rapport building can be beneficial for military missions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert "Skip"
An Adaptive Agent-Based Interface for Personalized Health Interventions Inproceedings
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 118–119, ACM, Cagliari Italy, 2020, ISBN: 978-1-4503-7513-9.
@inproceedings{mozgai_adaptive_2020,
title = {An Adaptive Agent-Based Interface for Personalized Health Interventions},
author = {Sharon Mozgai and Arno Hartholt and Albert "Skip" Rizzo},
url = {https://dl.acm.org/doi/10.1145/3379336.3381467},
doi = {10.1145/3379336.3381467},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {118--119},
publisher = {ACM},
address = {Cagliari Italy},
abstract = {This demo introduces a novel mHealth application with an agentbased interface designed to collect multimodal data with passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin) as well as through user self-report. This mHealth application delivers personalized and adaptive multimedia content via smartphone application specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health via novel adaptive logic-based algorithms while employing behavior change techniques (e.g., goal-setting, barrier identification, etc.). A virtual human coach leads all interactions to improve adherence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert
The Passive Sensing Agent: A Multimodal Adaptive mHealth Application Inproceedings
In: Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), pp. 1–3, IEEE, Austin, TX, USA, 2020, ISBN: 978-1-72814-716-1.
@inproceedings{mozgai_passive_2020,
title = {The Passive Sensing Agent: A Multimodal Adaptive mHealth Application},
author = {Sharon Mozgai and Arno Hartholt and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/9156177/},
doi = {10.1109/PerComWorkshops48775.2020.9156177},
isbn = {978-1-72814-716-1},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
pages = {1--3},
publisher = {IEEE},
address = {Austin, TX, USA},
abstract = {We are demoing the Passive Sensing Agent (PSA), an mHealth virtual human coach, that collects multimodal data through passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin). This virtual human interface delivers adaptive multi-media content via smartphone application that is specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Initially developed for the military, the PSA delivers health interventions (e.g., educational exercises, physical challenges, and performance feedback) matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). A virtual human coach leads all interactions including the firsttime user experience and the brief daily sessions. All interactions were specifically designed to engage and motivate the user while continuously collecting data on their cognitive, emotional, and physical fitness. This multi-component application is integrated and deployed on an iPhone and Apple Watch prototype; a civilian version is currently in-development.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pilly, Praveen K.; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Lerner, Itamar; Jones, Aaron P.; Robert, Bradley; Bryant, Natalie B.; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael D.
In: Frontiers in Neuroscience, vol. 13, pp. 1416, 2020, ISSN: 1662-453X.
@article{pilly_one-shot_2020,
title = {One-Shot Tagging During Wake and Cueing During Sleep With Spatiotemporal Patterns of Transcranial Electrical Stimulation Can Boost Long-Term Metamemory of Individual Episodes in Humans},
author = {Praveen K. Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Itamar Lerner and Aaron P. Jones and Bradley Robert and Natalie B. Bryant and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael D. Howard},
url = {https://www.frontiersin.org/article/10.3389/fnins.2019.01416/full},
doi = {10.3389/fnins.2019.01416},
issn = {1662-453X},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Neuroscience},
volume = {13},
pages = {1416},
abstract = {Targeted memory reactivation (TMR) during slow-wave oscillations (SWOs) in sleep has been demonstrated with sensory cues to achieve about 5–12% improvement in post-nap memory performance on simple laboratory tasks. But prior work has not yet addressed the one-shot aspect of episodic memory acquisition, or dealt with the presence of interference from ambient environmental cues in real-world settings. Further, TMR with sensory cues may not be scalable to the multitude of experiences over one’s lifetime. We designed a novel non-invasive non-sensory paradigm that tags one-shot experiences of minute-long naturalistic episodes in immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). In particular, we demonstrated that these STAMPs can be reapplied as brief pulses during SWOs in sleep to achieve about 10–20% improvement in the metamemory of targeted episodes compared to the control episodes at 48 hours after initial viewing. We found that STAMPs can not only facilitate but also impair metamemory for the targeted episodes based on an interaction between presleep metamemory and the number of STAMP applications during sleep. Overnight metamemory improvements were mediated by spectral power increases following the offset of STAMPs in the slow-spindle band (8–12 Hz) for left temporal areas in the scalp electroencephalography (EEG) during sleep. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhao, Sicheng; Wang, Shangfei; Soleymani, Mohammad; Joshi, Dhiraj; Ji, Qiang
Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey Journal Article
In: ACM Transactions on Multimedia Computing, Communications, and Applications, vol. 15, no. 3s, pp. 1–32, 2020, ISSN: 1551-6857, 1551-6865.
@article{zhao_affective_2020,
title = {Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey},
author = {Sicheng Zhao and Shangfei Wang and Mohammad Soleymani and Dhiraj Joshi and Qiang Ji},
url = {https://dl.acm.org/doi/10.1145/3363560},
doi = {10.1145/3363560},
issn = {1551-6857, 1551-6865},
year = {2020},
date = {2020-01-01},
journal = {ACM Transactions on Multimedia Computing, Communications, and Applications},
volume = {15},
number = {3s},
pages = {1--32},
abstract = {The wide popularity of digital photography and social networks has generated a rapidly growing volume of multimedia data (i.e., images, music, and videos), resulting in a great demand for managing, retrieving, and understanding these data. Affective computing (AC) of these data can help to understand human behaviors and enable wide applications. In this article, we survey the state-of-the-art AC technologies comprehensively for large-scale heterogeneous multimedia data. We begin this survey by introducing the typical emotion representation models from psychology that are widely employed in AC. We briefly describe the available datasets for evaluating AC algorithms. We then summarize and compare the representative methods on AC of different multimedia types, i.e., images, music, videos, and multimodal data, with the focus on both handcrafted features-based methods and deep learning methods. Finally, we discuss some challenges and future directions for multimedia affective computing.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2019
Georgila, Kallirroi; Core, Mark G; Nye, Benjamin D; Karumbaiah, Shamya; Auerbach, Daniel; Ram, Maya
Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training Inproceedings
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pp. 9, IFAAMAS, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{georgila_using_2019,
title = {Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training},
author = {Kallirroi Georgila and Mark G Core and Benjamin D Nye and Shamya Karumbaiah and Daniel Auerbach and Maya Ram},
url = {http://www.ifaamas.org/Proceedings/aamas2019/pdfs/p737.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {9},
publisher = {IFAAMAS},
address = {Montreal, Canada},
abstract = {Reinforcement Learning (RL) has been applied successfully to Intelligent Tutoring Systems (ITSs) in a limited set of well-defined domains such as mathematics and physics. This work is unique in using a large state space and for applying RL to tutoring interpersonal skills. Interpersonal skills are increasingly recognized as critical to both social and economic development. In particular, this work enhances an ITS designed to teach basic counseling skills that can be applied to challenging issues such as sexual harassment and workplace conflict. An initial data collection was used to train RL policies for the ITS, and an evaluation with human participants compared a hand-crafted ITS which had been used for years with students (control) versus the new ITS guided by RL policies. The RL condition differed from the control condition most notably in the strikingly large quantity of guidance it provided to learners. Both systems were effective and there was an overall significant increase from pre- to post-test scores. Although learning gains did not differ significantly between conditions, learners had a significantly higher self-rating of confidence in the RL condition. Confidence and learning gains were both part of the reward function used to train the RL policies, and it could be the case that there was the most room for improvement in confidence, an important learner emotion. Thus, RL was successful in improving an ITS for teaching interpersonal skills without the need to prune the state space (as previously done).},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Traum, David
In: Proceedings of the 9th Language and Technology Conference, LTC, Poznań, Poland, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_developing_2019,
title = {Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and David Traum},
url = {http://www-scf.usc.edu/ nasihati/publications/HLTCEM_2019.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 9th Language and Technology Conference},
publisher = {LTC},
address = {Poznań, Poland},
abstract = {Search and rescue missions involving robots face multiple challenges. The ratio of operators to robots is frequently one to one or higher, operators tasked with robots must contend with cognitive overload for long periods, and the robots themselves may be discomfiting to located survivors. To improve on the current state, we propose a swarm of robots equipped with natural language abilities and guided by a central virtual “spokesperson” able to access “plays”. The spokesperson may assist the operator with tasking the robots in their exploration of a zone, which allows the operator to maintain a safe distance. The use of multiple robots enables rescue personnel to cover a larger swath of ground, and the natural language component allows the robots to communicate with survivors located on site. This capability frees the operator to handle situations requiring personal attention, and overall can accelerate the location and assistance of survivors. In order to develop this system, we are creating a virtual reality simulation, in order to conduct a study and analysis of how humans communicate with these swarms of robots. The data collected from this experiment will inform how to best design emergency response swarm robots that are effectively able to communicate with the humans around them.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Inproceedings
In: Proceedings of IWSDS 2019, pp. 12, Siracusa, Italy, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gervits_classication-based_2019,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://www.semanticscholar.org/paper/A-Classification-Based-Approach-to-Automating-Gervits-Leuski/262cf9e3a14e370d46a5e65f7872b32482d9ea69?tab=abstract&citingPapersSort=is-influential&citingPapersLimit=10&citingPapersOffset=0&year%5B0%5D=&year%5B1%5D=&citedPapersSort=is-influential&citedPapersLimit=10&citedPapersOffset=10},
year = {2019},
date = {2019-04-01},
booktitle = {Proceedings of IWSDS 2019},
pages = {12},
address = {Siracusa, Italy},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multi-floor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Woo, Simon S.; Artstein, Ron; Kaiser, Elsi; Le, Xiao; Mirkovic, Jelena
Using Episodic Memory for User Authentication Journal Article
In: ACM Transactions on Privacy and Security, vol. 22, no. 2, pp. Article 11, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{woo_using_2019,
title = {Using Episodic Memory for User Authentication},
author = {Simon S. Woo and Ron Artstein and Elsi Kaiser and Xiao Le and Jelena Mirkovic},
url = {https://doi.org/10.1145/3308992},
doi = {10.1145/3308992},
year = {2019},
date = {2019-04-01},
journal = {ACM Transactions on Privacy and Security},
volume = {22},
number = {2},
pages = {Article 11},
abstract = {Passwords are widely used for user authentication, but they are often difficult for a user to recall, easily cracked by automated programs, and heavily reused. Security questions are also used for secondary authentication. They are more memorable than passwords, because the question serves as a hint to the user, but they are very easily guessed. We propose a new authentication mechanism, called “life-experience passwords (LEPs).” Sitting somewhere between passwords and security questions, an LEP consists of several facts about a user-chosen life event—such as a trip, a graduation, a wedding, and so on. At LEP creation, the system extracts these facts from the user’s input and transforms them into questions and answers. At authentication, the system prompts the user with questions and matches the answers with the stored ones. We show that question choice and design make LEPs much more secure than security questions and passwords, while the question-answer format promotes low password reuse and high recall. Specifically, we find that: (1) LEPs are 109–1014 × stronger than an ideal, randomized, eight-character password; (2) LEPs are up to 3 × more memorable than passwords and on par with security questions; and (3) LEPs are reused half as often as passwords. While both LEPs and security questions use personal experiences for authentication, LEPs use several questions that are closely tailored to each user. This increases LEP security against guessing attacks. In our evaluation, only 0.7% of LEPs were guessed by casual friends, and 9.5% by family members or close friends—roughly half of the security question guessing rate. On the downside, LEPs take around 5 × longer to input than passwords. So, these qualities make LEPs suitable for multi-factor authentication at high-value servers, such as financial or sensitive work servers, where stronger authentication strength is needed.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Bernardet, Ulysses; Kanq, Sin-Hwa; Feng, Andrew; DiPaola, Steve; Shapiro, Ari
Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study Inproceedings
In: 2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE), pp. 1–9, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72813-219-8.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{bernardet_speech_2019,
title = {Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study},
author = {Ulysses Bernardet and Sin-Hwa Kanq and Andrew Feng and Steve DiPaola and Ari Shapiro},
url = {https://ieeexplore.ieee.org/document/8714737/},
doi = {10.1109/VHCIE.2019.8714737},
isbn = {978-1-72813-219-8},
year = {2019},
date = {2019-03-01},
booktitle = {2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)},
pages = {1--9},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Human speech production requires the dynamic regulation of air through the vocal system. While virtual character systems commonly are capable of speech output, they rarely take breathing during speaking – speech breathing – into account. We believe that integrating dynamic speech breathing systems in virtual characters can significantly contribute to augmenting their realism. Here, we present a novel control architecture aimed at generating speech breathing in virtual characters. This architecture is informed by behavioral, linguistic and anatomical knowledge of human speech breathing. Based on textual input and controlled by a set of lowand high-level parameters, the system produces dynamic signals in real-time that control the virtual character’s anatomy (thorax, abdomen, head, nostrils, and mouth) and sound production (speech and breathing).},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Leuski, Anton; Benn, Grace; Klassen, Eric; Fast, Edward; Liewer, Matt; Hartholt, Arno; Traum, David
PRIMER: An Emotionally Aware Virtual Agent Inproceedings
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 10, ACM, Los Angeles, CA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_primer_2019,
title = {PRIMER: An Emotionally Aware Virtual Agent},
author = {Carla Gordon and Anton Leuski and Grace Benn and Eric Klassen and Edward Fast and Matt Liewer and Arno Hartholt and David Traum},
url = {https://www.research.ibm.com/haifa/Workshops/user2agent2019/},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {10},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {PRIMER is a proof-of-concept system designed to show the potential of immersive dialogue agents and virtual environments that adapt and respond to both direct verbal input and indirect emotional input. The system has two novel interfaces: (1) for the user, an immersive VR environment and an animated virtual agent both of which adapt and react to the user’s direct input as well as the user’s perceived emotional state, and (2) for an observer, an interface that helps track the perceived emotional state of the user, with visualizations to provide insight into the system’s decision making process. While the basic system architecture can be adapted for many potential real world applications, the initial version of this system was designed to assist clinical social workers in helping children cope with bullying. The virtual agent produces verbal and non-verbal behaviors guided by a plan for the counseling session, based on in-depth discussions with experienced counselors, but is also reactive to both initiatives that the user takes, e.g. asking their own questions, and the user’s perceived emotional state.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gordon, Carla; Sohail, Usman; Merchant, Chirag; Jones, Andrew; Campbell, Julia; Trimmer, Matthew; Bevington, Jeffrey; Engen, COL Christopher; Traum, David
Digital Survivor of Sexual Assault Inproceedings
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 417–425, ACM, Marina del Rey, California, 2019, ISBN: 978-1-4503-6272-6.
Abstract | Links | BibTeX | Tags: DoD, Graphics, MedVR, UARC, Virtual Humans
@inproceedings{artstein_digital_2019,
title = {Digital Survivor of Sexual Assault},
author = {Ron Artstein and Carla Gordon and Usman Sohail and Chirag Merchant and Andrew Jones and Julia Campbell and Matthew Trimmer and Jeffrey Bevington and COL Christopher Engen and David Traum},
url = {https://doi.org/10.1145/3301275.3302303},
doi = {10.1145/3301275.3302303},
isbn = {978-1-4503-6272-6},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {417--425},
publisher = {ACM},
address = {Marina del Rey, California},
abstract = {The Digital Survivor of Sexual Assault (DS2A) is an interface that allows a user to have a conversational experience with a survivor of sexual assault, using Artificial Intelligence technology and recorded videos. The application uses a statistical classifier to retrieve contextually appropriate pre-recorded video utterances by the survivor, together with dialogue management policies which enable users to conduct simulated conversations with the survivor about the sexual assault, its aftermath, and other pertinent topics. The content in the application has been specifically elicited to support the needs for the training of U.S. Army professionals in the Sexual Harassment/Assault Response and Prevention (SHARP) Program, and the application comes with an instructional support package. The system has been tested with approximately 200 users, and is presently being used in the SHARP Academy's capstone course.},
keywords = {DoD, Graphics, MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bönsch, Andrea; Feng, Andrew; Patel, Parth; Shapiro, Ari
Volumetric Video Capture using Unsynchronized, Low-cost Cameras: Inproceedings
In: Proceedings of the 14th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, pp. 255–261, SCITEPRESS - Science and Technology Publications, Prague, Czech Republic, 2019, ISBN: 978-989-758-354-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{bonsch_volumetric_2019,
title = {Volumetric Video Capture using Unsynchronized, Low-cost Cameras:},
author = {Andrea Bönsch and Andrew Feng and Parth Patel and Ari Shapiro},
url = {http://www.scitepress.org/DigitalLibrary/Link.aspx?doi=10.5220/0007373202550261},
doi = {10.5220/0007373202550261},
isbn = {978-989-758-354-4},
year = {2019},
date = {2019-02-01},
booktitle = {Proceedings of the 14th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications},
pages = {255--261},
publisher = {SCITEPRESS - Science and Technology Publications},
address = {Prague, Czech Republic},
abstract = {Volumetric video can be used in virtual and augmented reality applications to show detailed animated performances by human actors. In this paper, we describe a volumetric capture system based on a photogrammetry cage with unsynchronized, low-cost cameras which is able to generate high-quality geometric data for animated avatars. This approach requires, inter alia, a subsequent synchronization of the captured videos.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lerner, Itamar; Ketz, Nicholas A.; Jones, Aaron P.; Bryant, Natalie B.; Robert, Bradley; Skorheim, Steven W.; Hartholt, Arno; Rizzo, Albert S.; Gluck, Mark A.; Clark, Vincent P.; Pilly, Praveen K.
In: Scientific Reports, vol. 9, no. 1, 2019, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{lerner_transcranial_2019,
title = {Transcranial Current Stimulation During Sleep Facilitates Insight into Temporal Rules, but does not Consolidate Memories of Individual Sequential Experiences},
author = {Itamar Lerner and Nicholas A. Ketz and Aaron P. Jones and Natalie B. Bryant and Bradley Robert and Steven W. Skorheim and Arno Hartholt and Albert S. Rizzo and Mark A. Gluck and Vincent P. Clark and Praveen K. Pilly},
url = {http://www.nature.com/articles/s41598-018-36107-7},
doi = {10.1038/s41598-018-36107-7},
issn = {2045-2322},
year = {2019},
date = {2019-02-01},
journal = {Scientific Reports},
volume = {9},
number = {1},
abstract = {Slow-wave sleep (SWS) is known to contribute to memory consolidation, likely through the reactivation of previously encoded waking experiences. Contemporary studies demonstrate that when auditory or olfactory stimulation is administered during memory encoding and then reapplied during SWS, memory consolidation can be enhanced, an effect that is believed to rely on targeted memory reactivation (TMR) induced by the sensory stimulation. Here, we show that transcranial current stimulations (tCS) during sleep can also be used to induce TMR, resulting in the facilitation of high-level cognitive processes. Participants were exposed to repeating sequences in a realistic 3D immersive environment while being stimulated with particular tCS patterns. A subset of these tCS patterns was then reapplied during sleep stages N2 and SWS coupled to slow oscillations in a closed-loop manner. We found that in contrast to our initial hypothesis, performance for the sequences corresponding to the reapplied tCS patterns was no better than for other sequences that received stimulations only during wake or not at all. In contrast, we found that the more stimulations participants received overnight, the more likely they were to detect temporal regularities governing the learned sequences the following morning, with tCS-induced beta power modulations during sleep mediating this effect.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Human Cooperation When Acting Through Autonomous Machines Journal Article
In: Proceedings of the National Academy of Sciences, vol. 116, no. 9, pp. 3482–3487, 2019, ISSN: 0027-8424, 1091-6490.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@article{de_melo_human_2019,
title = {Human Cooperation When Acting Through Autonomous Machines},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1817656116},
doi = {10.1073/pnas.1817656116},
issn = {0027-8424, 1091-6490},
year = {2019},
date = {2019-02-01},
journal = {Proceedings of the National Academy of Sciences},
volume = {116},
number = {9},
pages = {3482--3487},
abstract = {Recent times have seen an emergence of intelligent machines that act autonomously on our behalf, such as autonomous vehicles. Despite promises of increased efficiency, it is not clear whether this paradigm shift will change how we decide when our self-interest (e.g., comfort) is pitted against the collective interest (e.g., environment). Here we show that acting through machines changes the way people solve these social dilemmas and we present experimental evidence showing that participants program their autonomous vehicles to act more cooperatively than if they were driving themselves. We show that this happens because programming causes selfish short-term rewards to become less salient, leading to considerations of broader societal goals. We also show that the programmed behavior is influenced by past experience. Finally, we report evidence that the effect generalizes beyond the domain of autonomous vehicles. We discuss implications for designing autonomous machines that contribute to a more cooperative society},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chu, Veronica C.; Lucas, Gale M.; Lei, Su; Mozgai, Sharon; Khooshabeh, Peter; Gratch, Jonathan
Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat Journal Article
In: Frontiers in Human Neuroscience, vol. 13, 2019, ISSN: 1662-5161.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, UARC, Virtual Humans
@article{chu_emotion_2019,
title = {Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat},
author = {Veronica C. Chu and Gale M. Lucas and Su Lei and Sharon Mozgai and Peter Khooshabeh and Jonathan Gratch},
url = {https://www.frontiersin.org/article/10.3389/fnhum.2019.00050/full},
doi = {10.3389/fnhum.2019.00050},
issn = {1662-5161},
year = {2019},
date = {2019-02-01},
journal = {Frontiers in Human Neuroscience},
volume = {13},
abstract = {The current study examines cooperation and cardiovascular responses in individuals that were defected on by their opponent in the first round of an iterated Prisoner’s Dilemma. In this scenario, participants were either primed with the emotion regulation strategy of reappraisal or no emotion regulation strategy, and their opponent either expressed an amused smile or a polite smile after the results were presented. We found that cooperation behavior decreased in the no emotion regulation group when the opponent expressed an amused smile compared to a polite smile. In the cardiovascular measures, we found significant differences between the emotion regulation conditions using the biopsychosocial (BPS) model of challenge and threat. However, the cardiovascular measures of participants instructed with the reappraisal strategy were only weakly comparable with a threat state of the BPS model, which involves decreased blood flow and perception of greater task demands than resources to cope with those demands. Conversely, the cardiovascular measures of participants without an emotion regulation were only weakly comparable with a challenge state of the BPS model, which involves increased blood flow and perception of having enough or more resources to cope with task demands.},
keywords = {ARL, DoD, MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2018
Khashe, Saba; Lucas, Gale; Becerik-Gerber, Burcin; Gratch, Jonathan
Establishing Social Dialog between Buildings and Their Users Journal Article
In: International Journal of Human–Computer Interaction, pp. 1–12, 2018, ISSN: 1044-7318, 1532-7590.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{khashe_establishing_2018,
title = {Establishing Social Dialog between Buildings and Their Users},
author = {Saba Khashe and Gale Lucas and Burcin Becerik-Gerber and Jonathan Gratch},
url = {https://www.tandfonline.com/doi/full/10.1080/10447318.2018.1555346},
doi = {10.1080/10447318.2018.1555346},
issn = {1044-7318, 1532-7590},
year = {2018},
date = {2018-12-01},
journal = {International Journal of Human–Computer Interaction},
pages = {1--12},
abstract = {Behavioral intervention strategies have yet to become successful in the development of initiatives to foster pro-environmental behaviors in buildings. In this paper, we explored the potentials of increasing the effectiveness of requests aiming to promote pro-environmental behaviors by engaging users in a social dialog, given the effects of two possible personas that are more related to the buildings (i.e., building vs. building manager). We tested our hypotheses and evaluated our findings in virtual and physical environments and found similar effects in both environments. Our results showed that social dialog involvement persuaded respondents to perform more pro-environmental actions. However, these effects were significant when the requests were delivered by an agent representing the building. In addition, these strategies were not equally effective across all types of people and their effects varied for people with different characteristics. Our findings provide useful design choices for persuasive technologies aiming to promote pro-environmental behaviors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Joshi, Himanshu; Rosenbloom, Paul S; Ustun, Volkan
Exact, Tractable Inference in the Sigma Cognitive Architecture via Sum-Product Networks Journal Article
In: Advances in Cognitive Systems, pp. 31–47, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{joshi_exact_2018,
title = {Exact, Tractable Inference in the Sigma Cognitive Architecture via Sum-Product Networks},
author = {Himanshu Joshi and Paul S Rosenbloom and Volkan Ustun},
url = {http://www.cogsys.org/papers/ACSvol7/papers/paper-7-4.pdf},
year = {2018},
date = {2018-12-01},
journal = {Advances in Cognitive Systems},
pages = {31--47},
abstract = {Sum-product networks (SPNs) are a new kind of deep architecture that support exact, tractable inference over a large class of problems for which traditional graphical models cannot. The Sigma cognitive architecture is based on graphical models, posing a challenge for it to handle problems within this class, such as parsing with probabilistic grammars, a potentially important aspect of language processing. This work proves that an early unidirectional extension to Sigma’s graphical architecture, originally added in service of rule-like behavior but later also shown to support neural networks, can be leveraged to yield exact, tractable computations across this class of problems, and further demonstrates this tractability experimentally for probabilistic parsing. It thus shows that Sigma is able to specify any valid SPN and, despite its grounding in graphical models, retain the desirable inference properties of SPNs when solving them.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Schwartz, David; Lewine, Gabrielle; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy
Addressing Sexist Attitudes on a College Campus through Virtual Role-Play with Digital Doppelgangers Inproceedings
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents - IVA '18, pp. 219–226, ACM Press, Sydney, NSW, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{wang_addressing_2018,
title = {Addressing Sexist Attitudes on a College Campus through Virtual Role-Play with Digital Doppelgangers},
author = {Ning Wang and David Schwartz and Gabrielle Lewine and Ari Shapiro and Andrew Feng and Cindy Zhuang},
url = {http://dl.acm.org/citation.cfm?doid=3267851.3267913},
doi = {10.1145/3267851.3267913},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents - IVA '18},
pages = {219--226},
publisher = {ACM Press},
address = {Sydney, NSW, Australia},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. Digital doppelgangers possess great potential to serve as powerful models for behavioral change. An emerging technology, the Rapid Avatar Capture and Simulation (RACAS) system, enables low-cost and high-speed scanning of a human user and creation of a digital doppelganger that is a fully animatable virtual 3D model of the user. We designed a virtual role-playing game, DELTA, that implements a powerful cognitive dissonance-based paradigm for attitudinal and behavioral change, and integrated it with digital doppelgangers to influence a human user’s attitude towards sexism on college campuses. In this paper, we discuss the design and evaluation the RACAS system and the DELTA game-based environment. Results indicate the potential impact of the DELTA game-based environment in creating an immersive virtual experience for attitudinal change.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Knott, Benjamin A.; Gratch, Jonathan; Cangelosi, Angelo; Caverlee, James
ACM Transactions on Interactive Intelligent Systems (TiiS) Special Issue on Trust and Influence in Intelligent Human-Machine Interaction Journal Article
In: ACM Transactions on Interactive Intelligent Systems, vol. 8, no. 4, pp. 1–3, 2018, ISSN: 21606455.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{knott_acm_2018,
title = {ACM Transactions on Interactive Intelligent Systems (TiiS) Special Issue on Trust and Influence in Intelligent Human-Machine Interaction},
author = {Benjamin A. Knott and Jonathan Gratch and Angelo Cangelosi and James Caverlee},
url = {http://dl.acm.org/citation.cfm?doid=3292532.3281451},
doi = {10.1145/3281451},
issn = {21606455},
year = {2018},
date = {2018-11-01},
journal = {ACM Transactions on Interactive Intelligent Systems},
volume = {8},
number = {4},
pages = {1--3},
abstract = {Recent advances in machine intelligence and robotics have enabled new forms of human-computer interaction characterized by greater adaptability, shared decision-making, and mixed initiative. These advances are leading toward machines that can operate with relative autonomy but are designed to interact or engage with human counterparts in joint human-machine teams. The degree to which people trust machines is critical to the efficacy of these teams. People will cooperate with, and rely upon, intelligent agents they trust. Those they do not trust fall into disuse. As intelligent agents become more self-directed, learn from their experiences, and adapt behavior over time, the relationship between people and machines becomes more complex, and designing system behaviors to engender the proper level of trust becomes more challenging. Moreover, as intelligent systems become common in safety-critical domains, we must understand and assess the influence they might exert on human decision making to avoid unintended consequences, such as over-trust, compliance, or undue influence. Online social environments further complicate human-machine relationships. In the social media ecosystem, intelligent agents (e.g., chatbots) might act as aids or assistants but also as competitors or adversaries. In this context, research challenges include understanding how human-machine relationships evolve in social media and especially how humans develop trust and are susceptible to influence in social networks.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Lucas, Gale; Mozgai, Sharon; Boberg, Jill; Artstein, Ron; Gratch, Jonathan
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 125–132, ACM, Sydney, Australia, 2018, ISBN: ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mell_towards_2018,
title = {Towards a Repeated Negotiating Agent that Treats People Individually: Cooperation, Social Value Orientation, & Machiavellianism},
author = {Johnathan Mell and Gale Lucas and Sharon Mozgai and Jill Boberg and Ron Artstein and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3267910},
doi = {10.1145/3267851.3267910},
isbn = {ISBN: 978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {125--132},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {We present the results of a study in which humans negotiate with computerized agents employing varied tactics over a repeated number of economic ultimatum games. We report that certain agents are highly effective against particular classes of humans: several individual difference measures for the human participant are shown to be critical in determining which agents will be successful. Asking for favors works when playing with pro-social people but backfires with more selfish individuals. Further, making poor offers invites punishment from Machiavellian individuals. These factors may be learned once and applied over repeated negotiations, which means user modeling techniques that can detect these differences accurately will be more successful than those that don’t. Our work additionally shows that a significant benefit of cooperation is also present in repeated games—after sufficient interaction. These results have deep significance to agent designers who wish to design agents that are effective in negotiating with a broad swath of real human opponents. Furthermore, it demonstrates the effectiveness of techniques which can reason about negotiation over time.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Georgila, Kallirroi; Choi, Hyungtak; Boberg, Jill; Traum, David
Evaluating Subjective Feedback for Internet of Things Dialogues Inproceedings
In: Proceedings of the 22nd Workshop on the Semantics and Pragmatics of Dialogue, pp. 64–72, Aix-en-Provence, France, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_evaluating_2018,
title = {Evaluating Subjective Feedback for Internet of Things Dialogues},
author = {Carla Gordon and Kallirroi Georgila and Hyungtak Choi and Jill Boberg and David Traum},
url = {https://amubox.univ-amu.fr/s/6YcAg3TpLpfzGEn#pdfviewer},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 22nd Workshop on the Semantics and Pragmatics of Dialogue},
pages = {64--72},
address = {Aix-en-Provence, France},
abstract = {This paper discusses the process of determining which subjective features are seen as ideal in a dialogue system, and linking these features to objectively quantifiable behaviors. A corpus of simulated system-user dialogues in the Internet of Things domain was manually annotated with a set of system communicative and action responses, and crowd-sourced ratings and qualitative feedback of these dialogues were collected. This corpus of subjective feedback was analyzed, revealing that raters described top ranked dialogues as Intelligent, Natural, Pleasant, and as having Personality. Additionally, certain communicative and action responses were statistically more likely to be present in dialogues described as having these features. There was also found to be a lack of agreement among raters as to whether a direct communication style, or a conversational one was preferred, suggesting that future research and development should consider creating models for different communication styles.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul S; Sajjadi, Seyed; Nuttall, Jeremy
Controlling Synthetic Characters in Simulations: A Case for Cognitive Architectures and Sigma Inproceedings
In: Proceedings of I/ITSEC 2018, National Training and Simulation Association, Orlando, FL, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ustun_controlling_2018,
title = {Controlling Synthetic Characters in Simulations: A Case for Cognitive Architectures and Sigma},
author = {Volkan Ustun and Paul S Rosenbloom and Seyed Sajjadi and Jeremy Nuttall},
url = {http://bcf.usc.edu/ rosenblo/Pubs/Ustun_IITSEC2018_D.pdf},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of I/ITSEC 2018},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {Simulations, along with other similar applications like virtual worlds and video games, require computational models of intelligence that generate realistic and credible behavior for the participating synthetic characters. Cognitive architectures, which are models of the fixed structure underlying intelligent behavior in both natural and artificial systems, provide a conceptually valid common basis, as evidenced by the current efforts towards a standard model of the mind, to generate human-like intelligent behavior for these synthetic characters. Developments in the field of artificial intelligence, mainly in probabilistic graphical models and neural networks, open up new opportunities for cognitive architectures to make the synthetic characters more autonomous and to enrich their behavior. Sigma (Σ) is a cognitive architecture and system that strives to combine what has been learned from four decades of independent work on symbolic cognitive architectures, probabilistic graphical models, and more recently neural models, under its graphical architecture hypothesis. Sigma leverages an extended form of factor graphs towards a uniform grand unification of not only traditional cognitive capabilities but also key non-cognitive aspects, creating unique opportunities for the construction of new kinds of cognitive models that possess a Theory-of-Mind and that are perceptual, autonomous, interactive, affective, and adaptive. In this paper, we will introduce Sigma along with its diverse capabilities and then use three distinct proof-of-concept Sigma models to highlight combinations of these capabilities: (1) Distributional reinforcement learning models in a simple OpenAI Gym problem; (2) A pair of adaptive and interactive agent models that demonstrate rule-based, probabilistic, and social reasoning in a physical security scenario instantiated within the SmartBody character animation platform; and (3) A knowledge-free exploration model in which an agent leverages only architectural appraisal variables, namely attention and curiosity, to locate an item while building up a map in a Unity environment.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Schalk, Job Van Der; Lucas, Gale; Gratch, Jonathan
The impact of agent facial mimicry on social behavior in a prisoner’s dilemma Inproceedings
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 275–280, ACM, Sydney, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{hoegen_impact_2018,
title = {The impact of agent facial mimicry on social behavior in a prisoner’s dilemma},
author = {Rens Hoegen and Job Van Der Schalk and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3267911},
doi = {10.1145/3267851.3267911},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {275--280},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {A long tradition of research suggests a relationship between emotional mimicry and pro-social behavior, but the nature of this relationship is unclear. Does mimicry cause rapport and cooperation, or merely reflect it? Virtual humans can provide unique insights into these social processes by allowing unprecedented levels of experimental control. In a 2 x 2 factorial design, we examined the impact of facial mimicry and counter-mimicry in the iterated prisoner’s dilemma. Participants played with an agent that copied their smiles and frowns or one that showed the opposite pattern – i.e., that frowned when they smiled. As people tend to smile more than frown, we independently manipulated the contingency of expressions to ensure any effects are due to mimicry alone, and not the overall positivity/negativity of the agent: i.e., participants saw either a reflection of their own expressions or saw the expressions shown to a previous participant. Results show that participants smiled significantly more when playing an agent that mimicked them. Results also show a complex association between smiling, feelings of rapport, and cooperation. We discuss the implications of these findings on virtual human systems and theories of cooperation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M; Kramer, Nicole; Peters, Clara; Taesch, Lisa-Sophie; Mell, Johnathan; Gratch, Jonathan
Effects of Perceived Agency and Message Tone in Responding to a Virtual Personal Trainer Inproceedings
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 247–254, ACM, Sydney, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_effects_2018,
title = {Effects of Perceived Agency and Message Tone in Responding to a Virtual Personal Trainer},
author = {Gale M Lucas and Nicole Kramer and Clara Peters and Lisa-Sophie Taesch and Johnathan Mell and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3267855},
doi = {10.1145/3267851.3267855},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {247--254},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {Research has demonstrated promising benefits of applying virtual trainers to promote physical fitness. The current study investigated the value of virtual agents in the context of personal fitness, compared to trainers with greater levels of perceived agency (avatar or live human). We also explored the possibility that the effectiveness of the virtual trainer might depend on the affective tone it uses when trying to motivate users. Accordingly, participants received either positively or negatively valenced motivational messages from a virtual human they believed to be either an agent or an avatar, or they received the messages from a human instructor via skype. Both self-report and physiological data were collected. Like in-person coaches, the live human trainer who used negatively valenced messages were well-regarded; however, when the agent or avatar used negatively valenced messages, participants responded more poorly than when they used positively valenced ones. Perceived agency also affected rapport: compared to the agent, users felt more rapport with the live human trainer or the avatar. Regardless of trainer type, they also felt more rapport - and said they put in more effort - with trainers that used positively valenced messages than those that used negatively valenced ones. However, in reality, they put in more physical effort (as measured by heart rate) when trainers employed the more negatively valenced affective tone. We discuss implications for human–computer interaction.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan; Baarslag, Tim; Aydogran, Reyhan; Jonker, Catholijn M
Results of the First Annual Human-Agent League of the Automated Negotiating Agents Competition Inproceedings
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 23–28, ACM, Sydney, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mell_results_2018,
title = {Results of the First Annual Human-Agent League of the Automated Negotiating Agents Competition},
author = {Johnathan Mell and Jonathan Gratch and Tim Baarslag and Reyhan Aydogran and Catholijn M Jonker},
url = {https://dl.acm.org/citation.cfm?id=3267907},
doi = {10.1145/3267851.3267907},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {23--28},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {We present the results of the first annual Human-Agent League of ANAC. By introducing a new human-agent negotiating platform to the research community at large, we facilitated new advancements in human-aware agents. This has succeeded in pushing the envelope in agent design, and creating a corpus of useful human-agent interaction data. Our results indicate a variety of agents were submitted, and that their varying strategies had distinct outcomes on many measures of the negotiation. These agents approach the problems endemic to human negotiation, including user modeling, bidding strategy, rapport techniques, and strategic bargaining. Some agents employed advanced tactics in information gathering or emotional displays and gained more points than their opponents, while others were considered more “likeable” by their partners.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M; Boberg, Jill; Traum, David; Artstein, Ron; Gratch, Jonathan; Gainer, Alesia; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Culture, Errors, and Rapport-building Dialogue in Social Agents Inproceedings
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 51–58, ACM, Sydney, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_culture_2018,
title = {Culture, Errors, and Rapport-building Dialogue in Social Agents},
author = {Gale M Lucas and Jill Boberg and David Traum and Ron Artstein and Jonathan Gratch and Alesia Gainer and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {https://dl.acm.org/citation.cfm?id=3267887},
doi = {10.1145/3267851.3267887},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {51--58},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {This work explores whether culture impacts the extent to which social dialogue can mitigate (or exacerbate) the loss of trust caused when agents make conversational errors. Our study uses an agent designed to persuade users to agree with its rankings on two tasks. Participants from the U.S. and Japan completed our study. We perform two manipulations: (1) The presence of conversational errors – the agent exhibited errors in the second task or not; (2) The presence of social dialogue – between the two tasks, users either engaged in a social dialogue with the agent or completed a control task. Replicating previous research, conversational errors reduce the agent’s influence. However, we found that culture matters: there was a marginally significant three-way interaction with culture, presence of social dialogue, and presence of errors. The pattern of results suggests that, for American participants, social dialogue backfired if it is followed by errors, presumably because it extends the period of good performance, creating a stronger contrast effect with the subsequent errors. However, for Japanese participants, social dialogue if anything mitigates the detrimental effect of errors; the negative effect of errors is only seen in the absence of a social dialogue. Agent design should therefore take the culture of the intended users into consideration when considering use of social dialogue to bolster agents against conversational errors.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Xijing; Krumhuber, Eva G.; Gratch, Jonathan
The interpersonal effects of emotions in money versus candy games Journal Article
In: Journal of Experimental Social Psychology, vol. 79, pp. 315–327, 2018.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{wang_interpersonal_2018,
title = {The interpersonal effects of emotions in money versus candy games},
author = {Xijing Wang and Eva G. Krumhuber and Jonathan Gratch},
url = {https://www.sciencedirect.com/science/article/pii/S0022103118301914},
year = {2018},
date = {2018-11-01},
journal = {Journal of Experimental Social Psychology},
volume = {79},
pages = {315--327},
abstract = {Emotional expressions significantly influence perceivers’ behavior in economic games and negotiations. The current research examined the interpersonal effects of emotions when such information cannot be used to guide behavior for increasing personal gain and when monetary rewards are made salient. For this, a one-shot Public Goods Game (Studies 1, 2, and 3) and Dictator Game (Studies 4 and 5) were employed, in which the dominant strategy to maximize personal payoff is independent from the counterplayers’ intention signaled through their facial expressions (happiness, sadness, and anger). To elicit a monetary mindset, we used money (vs. candy) as the mode of exchange in the games with (Studies 1 and 2) or without (Studies 3, 4, and 5) additional contextual framing (i.e. Wall Street Game vs. Community Game). Across five studies (N = 1211), participants were found to be more generous towards happy and sad targets compared to angry ones. Such behavioral response based on emotional information was accounted for by the trait impressions (i.e. likability, trustworthiness) formed of the counterplayer. This effect was significantly reduced when money acted as the mode of exchange, thereby making participants focus more on their selfgain. Together, the findings extend previous work by highlighting the social functional role of emotions in human exchange and its moderation by money as a transaction medium.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Marge, Matthew; Bonial, Claire; Lukin, Stephanie M.; Hayes, Cory J.; Foots, Ashley; Artstein, Ron; Henry, Cassidy; Pollard, Kimberly A.; Gordon, Carla; Gervits, Felix; Leuski, Anton; Hill, Susan G.; Voss, Clare R.; Traum, David
Balancing Efficiency and Coverage in Human-Robot Dialogue Collection Inproceedings
In: Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction, arXiv, Arlington, Virginia, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{marge_balancing_2018,
title = {Balancing Efficiency and Coverage in Human-Robot Dialogue Collection},
author = {Matthew Marge and Claire Bonial and Stephanie M. Lukin and Cory J. Hayes and Ashley Foots and Ron Artstein and Cassidy Henry and Kimberly A. Pollard and Carla Gordon and Felix Gervits and Anton Leuski and Susan G. Hill and Clare R. Voss and David Traum},
url = {https://arxiv.org/abs/1810.02017},
year = {2018},
date = {2018-10-01},
booktitle = {Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction},
publisher = {arXiv},
address = {Arlington, Virginia},
abstract = {We describe a multi-phased Wizard-of-Oz approach to collecting human-robot dialogue in a collaborative search and navigation task. The data is being used to train an initial automated robot dialogue system to support collaborative exploration tasks. In the first phase, a wizard freely typed robot utterances to human participants. For the second phase, this data was used to design a GUI that includes buttons for the most common communications, and templates for communications with varying parameters. Comparison of the data gathered in these phases show that the GUI enabled a faster pace of dialogue while still maintaining high coverage of suitable responses, enabling more efficient targeted data collection, and improvements in natural language understanding using GUI-collected data. As a promising first step towardsinteractivelearning,thisworkshowsthatourapproach enables the collection of useful training data for navigationbased HRI tasks.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Traum, David; Merla, Arcangelo; Hee, Eugenia; Walker, Zoey; Manini, Barbara; Gallagher, Grady; Petitto, Laura-Ann
Multimodal Dialogue Management for Multiparty Interaction with Infants Inproceedings
In: Proceedings of the 2018 on International Conference on Multimodal Interaction - ICMI '18, pp. 5–13, ACM Press, Boulder, CO, USA, 2018, ISBN: 978-1-4503-5692-3.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nasihati_gilani_multimodal_2018,
title = {Multimodal Dialogue Management for Multiparty Interaction with Infants},
author = {Setareh Nasihati Gilani and David Traum and Arcangelo Merla and Eugenia Hee and Zoey Walker and Barbara Manini and Grady Gallagher and Laura-Ann Petitto},
url = {http://dl.acm.org/citation.cfm?doid=3242969.3243029},
doi = {10.1145/3242969.3243029},
isbn = {978-1-4503-5692-3},
year = {2018},
date = {2018-10-01},
booktitle = {Proceedings of the 2018 on International Conference on Multimodal Interaction - ICMI '18},
pages = {5--13},
publisher = {ACM Press},
address = {Boulder, CO, USA},
abstract = {We present dialogue management routines for a system to engage in multiparty agent-infant interaction. The ultimate purpose of this research is to help infants learn a visual sign language by engaging them in naturalistic and socially contingent conversations during an early-life critical period for language development (ages 6 to 12 months) as initiated by an artificial agent. As a first step, we focus on creating and maintaining agent-infant engagement that elicits appropriate and socially contingent responses from the baby. Our system includes two agents, a physical robot and an animated virtual human. The system's multimodal perception includes an eye-tracker (measures attention) and a thermal infrared imaging camera (measures patterns of emotional arousal). A dialogue policy is presented that selects individual actions and planned multiparty sequences based on perceptual inputs about the baby's internal changing states of emotional engagement. The present version of the system was evaluated in interaction with 8 babies. All babies demonstrated spontaneous and sustained engagement with the agents for several minutes, with patterns of conversationally relevant and socially contingent behaviors. We further performed a detailed case-study analysis with annotation of all agent and baby behaviors. Results show that the baby's behaviors were generally relevant to agent conversations and contained direct evidence for socially contingent responses by the baby to specific linguistic samples produced by the avatar. This work demonstrates the potential for language learning from agents in very young babies and has especially broad implications regarding the use of artificial agents with babies who have minimal language exposure in early life.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Aljanaki, Anna; Soleymani, Mohammad
A data-driven approach to mid-level perceptual musical feature modeling Inproceedings
In: Proceedings of the 19th International Society for Music Information Retrieval Conference, arXiv, Paris, France, 2018.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{aljanaki_data-driven_2018,
title = {A data-driven approach to mid-level perceptual musical feature modeling},
author = {Anna Aljanaki and Mohammad Soleymani},
url = {https://arxiv.org/abs/1806.04903},
year = {2018},
date = {2018-09-01},
booktitle = {Proceedings of the 19th International Society for Music Information Retrieval Conference},
publisher = {arXiv},
address = {Paris, France},
abstract = {Musical features and descriptors could be coarsely divided into three levels of complexity. The bottom level contains the basic building blocks of music, e.g., chords, beats and timbre. The middle level contains concepts that emerge from combining the basic blocks: tonal and rhythmic stability, harmonic and rhythmic complexity, etc. High-level descriptors (genre, mood, expressive style) are usually modeled using the lower level ones. The features belonging to the middle level can both improve automatic recognition of high-level descriptors, and provide new music retrieval possibilities. Mid-level features are subjective and usually lack clear definitions. However, they are very important for human perception of music, and on some of them people can reach high agreement, even though defining them and therefore, designing a hand-crafted feature extractor for them can be difficult. In this paper, we derive the mid-level descriptors from data. We collect and release a datasettextbackslashtextbackslashtextbackslashtextbackslashfootnotehttps://osf.io/5aupt/ of 5000 songs annotated by musicians with seven mid-level descriptors, namely, melodiousness, tonal and rhythmic stability, modality, rhythmic complexity, dissonance and articulation. We then compare several approaches to predicting these descriptors from spectrograms using deep-learning. We also demonstrate the usefulness of these mid-level features using music emotion recognition as an application.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rayatdoost, Soheil; Soleymani, Mohammad
CROSS-CORPUS EEG-BASED EMOTION RECOGNITION Inproceedings
In: 2018 IEEE 28th International Workshop on Machine Learning for Signal Processing (MLSP), pp. 1–6, IEEE, Aalborg, Denmark, 2018, ISBN: 978-1-5386-5477-4.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{rayatdoost_cross-corpus_2018,
title = {CROSS-CORPUS EEG-BASED EMOTION RECOGNITION},
author = {Soheil Rayatdoost and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/8517037/},
doi = {10.1109/MLSP.2018.8517037},
isbn = {978-1-5386-5477-4},
year = {2018},
date = {2018-09-01},
booktitle = {2018 IEEE 28th International Workshop on Machine Learning for Signal Processing (MLSP)},
pages = {1--6},
publisher = {IEEE},
address = {Aalborg, Denmark},
abstract = {Lack of generalization is a common problem in automatic emotion recognition. The present study aims to explore the suitability of the existing EEG features for emotion recognition and investigate the performance of emotion recognition methods across different corpora. We introduce a novel dataset which includes spontaneous emotions and was analyzed in addition to the existing datasets for cross-corpus evaluation. We demonstrate that the performance of the existing methods significantly decreases when evaluated across different corpora. The best results are obtained by a convolutional neural network fed by spectral topography maps from different bands. We provide some evidence that stimuli-related sensory information is learned by machine learning models for emotion recognition using EEG signals.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Larue, Othalia; West, Robert; Rosenbloom, Paul S.; Dancy, Christopher L.; Samsonovich, Alexei V.; Petters, Dean; Juvina, Ion
Emotion in the Common Model of Cognition Journal Article
In: Procedia Computer Science, vol. 145, pp. 740–746, 2018, ISSN: 18770509.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{larue_emotion_2018,
title = {Emotion in the Common Model of Cognition},
author = {Othalia Larue and Robert West and Paul S. Rosenbloom and Christopher L. Dancy and Alexei V. Samsonovich and Dean Petters and Ion Juvina},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1877050918323317},
doi = {10.1016/j.procs.2018.11.045},
issn = {18770509},
year = {2018},
date = {2018-08-01},
journal = {Procedia Computer Science},
volume = {145},
pages = {740--746},
abstract = {Emotions play an important role in human cognition and therefore need to be present in the Common Model of Cognition. In this paper, the emotion working group focuses on functional aspects of emotions and describes what we believe are the points of interactions with the Common Model of Cognition. The present paper should not be viewed as a consensus of the group but rather as a first attempt to extract common and divergent aspects of different models of emotions and how they relate to the Common Model of Cognition.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kralik, Jerald D.; Lee, Jee Hang; Rosenbloom, Paul S.; Jackson, Philip C.; Epstein, Susan L.; Romero, Oscar J.; Sanz, Ricardo; Larue, Othalia; Schmidtke, Hedda R.; Lee, Sang Wan; McGreggor, Keith
Metacognition for a Common Model of Cognition Journal Article
In: Procedia Computer Science, vol. 145, pp. 730–739, 2018, ISSN: 18770509.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{kralik_metacognition_2018,
title = {Metacognition for a Common Model of Cognition},
author = {Jerald D. Kralik and Jee Hang Lee and Paul S. Rosenbloom and Philip C. Jackson and Susan L. Epstein and Oscar J. Romero and Ricardo Sanz and Othalia Larue and Hedda R. Schmidtke and Sang Wan Lee and Keith McGreggor},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1877050918323329},
doi = {10.1016/j.procs.2018.11.046},
issn = {18770509},
year = {2018},
date = {2018-08-01},
journal = {Procedia Computer Science},
volume = {145},
pages = {730--739},
abstract = {This paper provides a starting point for the development of metacognition in a common model of cognition. It identifies significant theoretical work on metacognition from multiple disciplines that the authors believe worthy of consideration. After first defining cognition and metacognition, we outline three general categories of metacognition, provide an initial list of its main components, consider the more difficult problem of consciousness, and present examples of prominent artificial systems that have implemented metacognitive components. Finally, we identify pressing design issues for the future},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Manuvinakurike, Ramesh; Brixey, Jacqueline; Bui, Trung; Chang, Walter; Artstein, Ron; Georgila, Kallirroi
DialEdit: Annotations for Spoken Conversational Image Editing Inproceedings
In: Proceedings of the 14th Joint ACL - ISO Workshop on Interoperable Semantic Annotation, Association for Computational Linguistics, Santa Fe, New Mexico, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_dialedit_2018,
title = {DialEdit: Annotations for Spoken Conversational Image Editing},
author = {Ramesh Manuvinakurike and Jacqueline Brixey and Trung Bui and Walter Chang and Ron Artstein and Kallirroi Georgila},
url = {https://aclanthology.info/papers/W18-4701/w18-4701},
year = {2018},
date = {2018-08-01},
booktitle = {Proceedings of the 14th Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {Association for Computational Linguistics},
address = {Santa Fe, New Mexico},
abstract = {We present a spoken dialogue corpus and annotation scheme for conversational image editing, where people edit an image interactively through spoken language instructions. Our corpus contains spoken conversations between two human participants: users requesting changes to images and experts performing these modifications in real time. Our annotation scheme consists of 26 dialogue act labels covering instructions, requests, and feedback, together with actions and entities for the content of the edit requests. The corpus supports research and development in areas such as incremental intent recognition, visual reference resolution, image-grounded dialogue modeling, dialogue state tracking, and user modeling.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Welcome to the Real World: How Agent Strategy Increases Human Willingness to Deceive Inproceedings
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1250–1257, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{mell_welcome_2018,
title = {Welcome to the Real World: How Agent Strategy Increases Human Willingness to Deceive},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3237884},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {1250--1257},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Humans that negotiate through representatives often instruct those representatives to act in certain ways that align with both the client's goals and his or her social norms. However, which tactics and ethical norms humans endorse vary widely from person to person, and these endorsements may be easy to manipulate. This work presents the results of a study that demonstrates that humans that interact with an artificial agent may change what kinds of tactics and norms they endorse-often dramatically. Previous work has indicated that people that negotiate through artificial agent representatives may be more inclined to fairness than those people that negotiate directly. Our work qualifies that initial picture, demonstrating that subsequent experience may change this tendency toward fairness. By exposing human negotiators to tough, automated agents, we are able to shift the participant's willingness to deceive others and utilize "hard-ball" negotiation techniques. In short, what techniques people decide to endorse is dependent upon their context and experience. We examine the effects of interacting with four different types of automated agents, each with a unique strategy, and how this subsequently changes which strategies a human negotiator might later endorse. In the study, which was conducted on an online negotiation platform, four different types of automated agents negotiate with humans over the course of a 10-minute interaction. The agents differ in a 2x2 design according to agent strategy (tough vs. fair) and agent attitude (nice vs. nasty). These results show that in this multi-issue bargaining task, humans that interacted with a tough agent were more willing to endorse deceptive techniques when instructing their own representative. These kinds of techniques were endorsed even if the agent the human encountered did not use deception as part of its strategy. In contrast to some previous work, there was not a significant effect of agent attitude. These results indicate the power of allowing people to program agents that follow their instructions, but also indicate that these social norms and tactic endorsements may be mutable in the presence of real negotiation experience.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Marge, Matthew; Henry, Cassidy; Artstein, Ron; Traum, David; Voss, Clare R.
Consequences and Factors of Stylistic Differences in Human-Robot Dialogue Inproceedings
In: Proceedings of the SIGDIAL 2018 Conference, pp. 110–118, Association for Computational Linguistics, Melbourne, Australia, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{lukin_consequences_2018,
title = {Consequences and Factors of Stylistic Differences in Human-Robot Dialogue},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Matthew Marge and Cassidy Henry and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/papers/W/W18/W18-5012/},
doi = {10.18653/v1/W18-5012},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the SIGDIAL 2018 Conference},
pages = {110--118},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {This paper identifies stylistic differences in instruction-giving observed in a corpus of human-robot dialogue. Differences in verbosity and structure (i.e., single-intent vs. multi-intent instructions) arose naturally without restrictions or prior guidance on how users should speak with the robot. Different styles were found to produce different rates of miscommunication, and correlations were found between style differences and individual user variation, trust, and interaction experience with the robot. Understanding potential consequences and factors that influence style can inform design of dialogue systems that are robust to natural variation from human users.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Muessig, Kathryn E.; Knudtson, Kelly A.; Soni, Karina; Larsen, Margo Adams; Traum, David; Dong, Willa; Conserve, Donaldson F.; Leuski, Anton; Artstein, Ron; Hightow-Weidman, Lisa B.
In: Digital Culture and Education, vol. 10, pp. 22–48, 2018, ISSN: 1836-8301.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{muessig_i_2018,
title = {“I Didn't Tell You Sooner Because I Didn't Know How to Handle it Myself”: Developing a Virtual Reality Program to Support HIV-Status Disclosure Decisions},
author = {Kathryn E. Muessig and Kelly A. Knudtson and Karina Soni and Margo Adams Larsen and David Traum and Willa Dong and Donaldson F. Conserve and Anton Leuski and Ron Artstein and Lisa B. Hightow-Weidman},
url = {http://www.digitalcultureandeducation.com/s/Muessig-et-al-July-2018.pdf},
issn = {1836-8301},
year = {2018},
date = {2018-07-01},
journal = {Digital Culture and Education},
volume = {10},
pages = {22--48},
abstract = {HIV status disclosure is associated with increased social support and protective behaviors against HIV transmission. Yet disclosure poses significant challenges in the face of persistent societal stigma. Few interventions focus on decision-making, self-efficacy, and communication skills to support disclosing HIV status to an intimate partner. Virtual reality (VR) and artificial intelligence (AI) technologies offer powerful tools to address this gap. Informed by Social Cognitive Theory, we created the Tough Talks VR program for HIV-positive young men who have sex with men (YMSM) to practice status disclosure safely and confidentially. Fifty-eight YMSM (ages 18 – 30, 88% HIV-positive) contributed 132 disclosure dialogues to develop the prototype through focus groups, usability testing, and a technical pilot. The prototype includes three disclosure scenarios (neutral, sympathetic, and negative response) and a database of 125 virtual character utterances. Participants select a VR scenario and realistic virtual character with whom to practice. In a pilot test of the fully automated neutral response scenario, the AI system responded appropriately to 71% of participant utterances. Most pilot study participants agreed Tough Talks was easy to use (9/11) and that they would like to use the system frequently (9/11). Tough Talks demonstrates that VR can be used to practice HIV status disclosure and lessons learned from program development offer insights for the use of AI systems for other areas of health and education.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Manuvinakurike, Ramesh; Bui, Trung; Chang, Walter; Georgila, Kallirroi
Conversational Image Editing: Incremental Intent Identification in a New Dialogue Task Inproceedings
In: Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue, pp. 284–295, Association for Computational Linguistics, Melbourne, Australia, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_conversational_2018,
title = {Conversational Image Editing: Incremental Intent Identification in a New Dialogue Task},
author = {Ramesh Manuvinakurike and Trung Bui and Walter Chang and Kallirroi Georgila},
url = {https://aclanthology.info/papers/W18-5033/w18-5033},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue},
pages = {284--295},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {We present “conversational image editing”, a novel real-world application domain combining dialogue, visual information, and the use of computer vision. We discuss the importance of dialogue incrementality in this task, and build various models for incremental intent identification based on deep learning and traditional classification algorithms. We show how our model based on convolutional neural networks outperforms models based on random forests, long short term memory networks, and conditional random fields. By training embeddings based on image-related dialogue corpora, we outperform pre-trained out-of-the-box embeddings, for intention identification tasks. Our experiments also provide evidence that incremental intent processing may be more efficient for the user and could save time in accomplishing tasks.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Karkada, Deepthi; Manuvinakurike, Ramesh; Georgila, Kallirroi
Towards Understanding End-of-trip Instructions in a Taxi Ride Scenario Inproceedings
In: Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation, arxiv.org, Santa Fe, New Mexico, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{karkada_towards_2018,
title = {Towards Understanding End-of-trip Instructions in a Taxi Ride Scenario},
author = {Deepthi Karkada and Ramesh Manuvinakurike and Kallirroi Georgila},
url = {https://arxiv.org/abs/1807.03950},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {arxiv.org},
address = {Santa Fe, New Mexico},
abstract = {We introduce a dataset containing human-authored descriptions of target locations in an “end of-trip in a taxi ride” scenario. We describe our data collection method and a novel annotation scheme that supports understanding of such descriptions of target locations. Our dataset contains target location descriptions for both synthetic and real-world images as well as visual annotations (ground truth labels, dimensions of vehicles and objects, coordinates of the target location, distance and direction of the target location from vehicles and objects) that can be used in various visual and language tasks. We also perform a pilot experiment on how the corpus could be applied to visual reference resolution in this domain.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Bharadwaj, Sumanth; Georgila, Kallirroi
A Dialogue Annotation Scheme for Weight Management Chat using the Trans-Theoretical Model of Health Behavior Change Inproceedings
In: Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation, arxiv.org, Sante Fe, New Mexico, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_dialogue_2018,
title = {A Dialogue Annotation Scheme for Weight Management Chat using the Trans-Theoretical Model of Health Behavior Change},
author = {Ramesh Manuvinakurike and Sumanth Bharadwaj and Kallirroi Georgila},
url = {https://arxiv.org/abs/1807.03948},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {arxiv.org},
address = {Sante Fe, New Mexico},
abstract = {A dialogue annotation scheme for weight management chat using the trans-theoretical model of health behavior change},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Khooshabeh, Peter; Amir, Ori; Gratch, Jonathan
Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing Inproceedings
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 2224–2226, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{de_melo_shaping_2018,
title = {Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing},
author = {Celso M. Melo and Peter Khooshabeh and Ori Amir and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3238129},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {2224--2226},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Emotion expressions can help solve social dilemmas where individual interest is pitted against the collective interest. Building on research that shows that emotions communicate intentions to others, we reinforce that people can infer whether emotionally expressive computer agents intend to cooperate or compete. We further show important distinctions between computer agents that are perceived to be driven by humans (i.e., avatars) vs. by algorithms (i.e., agents). Our results reveal that, when the emotion expression reflects an intention to cooperate, participants will cooperate more with avatars than with agents; however, when the emotion reflects an intention to compete, participants cooperate just as little with avatars as with agents. Finally, we present first evidence that the way the dilemma is described - or framed - can influence people's decision-making. We discuss implications for the design of autonomous agents that foster cooperation with humans, beyond what game theory predicts in social dilemmas.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Monahan, Shannon; Johnson, Emmanuel; Lucas, Gale; Finch, James; Gratch, Jonathan
Autonomous Agent that Provides Automated Feedback Improves Negotiation Skills Incollection
In: Artificial Intelligence in Education, vol. 10948, pp. 225–229, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-93845-5 978-3-319-93846-2.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{monahan_autonomous_2018,
title = {Autonomous Agent that Provides Automated Feedback Improves Negotiation Skills},
author = {Shannon Monahan and Emmanuel Johnson and Gale Lucas and James Finch and Jonathan Gratch},
url = {http://link.springer.com/10.1007/978-3-319-93846-2_41},
doi = {10.1007/978-3-319-93846-2_41},
isbn = {978-3-319-93845-5 978-3-319-93846-2},
year = {2018},
date = {2018-06-01},
booktitle = {Artificial Intelligence in Education},
volume = {10948},
pages = {225--229},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Research has found that individuals can improve their negotiation abilities by practicing with virtual agents [1, 2]. For these pedagogical agents to become more “intelligent,” the system should be able to give feedback on negotiation performance [3, 4]. In this study, we examined the impact of providing such individualized feedback. Participants first engaged in a negotiation with a virtual agent. After this negotiation, participants were either given automated individualized feedback or not. Feedback was based on negotiation principles [4], which were quantified using a validated approach [5]. Participants then completed a second, parallel negotiation. Our results show that, compared to the control condition, participants who received such feedback after the first negotiation showed a significantly greater improvement in the strength of their first offer, concession curve, and thus their final outcome in the negotiation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Flenner, Arjuna; Fraune, Marlena R.; Hiatt, Laura M.; Kendall, Tony; Laird, John E.; Lebiere, Christian; Rosenbloom, Paul S.; Stein, Frank; Topp, Elin A.; Unhelkar, Vaibhav V.; Zhao, Ying
Reports of the AAAI 2017 Fall Symposium Series Journal Article
In: AI Magazine, vol. 38, no. 2, pp. 81–86, 2018, ISSN: 0738-4602, 0738-4602.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{flenner_reports_2018,
title = {Reports of the AAAI 2017 Fall Symposium Series},
author = {Arjuna Flenner and Marlena R. Fraune and Laura M. Hiatt and Tony Kendall and John E. Laird and Christian Lebiere and Paul S. Rosenbloom and Frank Stein and Elin A. Topp and Vaibhav V. Unhelkar and Ying Zhao},
url = {https://www.aaai.org/ojs/index.php/aimagazine/article/view/2813},
doi = {10.1609/aimag.v38i2.2813},
issn = {0738-4602, 0738-4602},
year = {2018},
date = {2018-06-01},
journal = {AI Magazine},
volume = {38},
number = {2},
pages = {81--86},
abstract = {The AAAI 2017 Fall Symposium Series was held Thursday through Saturday, November 9-11, at the Westin Arlington Gateway in Arlington, Virginia, adjacent to Washington, DC. The titles of the six symposia were Arti- ficial Intelligence for Human-Robot Interaction; Cognitive Assistance in Government and Public Sector Applications; Deep Models and Artificial Intelligence for Military Applications: Potentials, Theories, Practices, Tools, and Risks; Human-Agent Groups: Studies, Algorithms, and Challenges; Natural Communication for Human-Robot Collaboration; and A Standard Model of the Mind. The highlights of each symposium (except the Natural Communication for Human-Robot Collaboration symposium, whose organizers did not submit a report) are presented in this report.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Schwartz, David; Goldberg, Stephen L.
An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger Inproceedings
In: Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE), pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC, Virtual Humans
@inproceedings{wang_analysis_2018,
title = {An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and David Schwartz and Stephen L. Goldberg},
url = {http://ceur-ws.org/Vol-2141/paper3.pdf},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE)},
volume = {10858},
pages = {256--264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. Using a low-cost and high-speed computer graphics and character animation technology, we created digital doppelgangers of students and placed them in a learning-byexplaining task where they interacted with digital doppelgangers of themselves. We investigate the research question of how does increasing the similarity of the physical appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual human listener in a learning-by-explaining paradigm. It presents an analysis of how students’ perceptions of the resemblance impact their learning experience and outcomes. The analysis and results offer insight into the promise and limitation of the application of this novel technology to pedagogical agents research.},
keywords = {ARL, DoD, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Karumbaiah, Shamya; Tokel, S. Tugba; Core, Mark G.; Stratou, Giota; Auerbach, Daniel; Georgila, Kallirroi
Engaging with the Scenario: Affect and Facial Patterns from a Scenario-Based Intelligent Tutoring System Inproceedings
In: Proceeding of the International Conference on Artificial Intelligence in Education, pp. 352–366, Springer International Publishing, London, UK, 2018, ISBN: 978-3-319-93842-4 978-3-319-93843-1.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{nye_engaging_2018,
title = {Engaging with the Scenario: Affect and Facial Patterns from a Scenario-Based Intelligent Tutoring System},
author = {Benjamin D. Nye and Shamya Karumbaiah and S. Tugba Tokel and Mark G. Core and Giota Stratou and Daniel Auerbach and Kallirroi Georgila},
url = {http://link.springer.com/10.1007/978-3-319-93843-1_26},
doi = {10.1007/978-3-319-93843-1_26},
isbn = {978-3-319-93842-4 978-3-319-93843-1},
year = {2018},
date = {2018-06-01},
booktitle = {Proceeding of the International Conference on Artificial Intelligence in Education},
volume = {10947},
pages = {352--366},
publisher = {Springer International Publishing},
address = {London, UK},
abstract = {Facial expression trackers output measures for facial action units (AUs), and are increasingly being used in learning technologies. In this paper, we compile patterns of AUs seen in related work as well as use factor analysis to search for categories implicit in our corpus. Although there was some overlap between the factors in our data and previous work, we also identified factors seen in the broader literature but not previously reported in the context of learning environments. In a correlational analysis, we found evidence for relationships between factors and self-reported traits such as academic effort, study habits, and interest in the subject. In addition, we saw differences in average levels of factors between a video watching activity, and a decision making activity. However, in this analysis, we were not able to isolate any facial expressions having a significant positive or negative relationship with either learning gain, or performance once question difficulty and related factors were also considered. Given the overall low levels of facial affect in the corpus, further research will explore different populations and learning tasks to test the possible hypothesis that learners may have been in a pattern of “Over-Flow” in which they were engaged with the system, but not deeply thinking about the content or their errors.},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hampton, Andrew J.; Nye, Benjamin D.; Pavlik, Philip I.; Swartout, William R.; Graesser, Arthur C.; Gunderson, Joseph
Mitigating Knowledge Decay from Instruction with Voluntary Use of an Adaptive Learning System Inproceedings
In: Proceedings of the International Conference on Artificial Intelligence in Education, pp. 119–133, Springer International Publishing, London, UK, 2018, ISBN: 978-3-319-93845-5 978-3-319-93846-2.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{hampton_mitigating_2018,
title = {Mitigating Knowledge Decay from Instruction with Voluntary Use of an Adaptive Learning System},
author = {Andrew J. Hampton and Benjamin D. Nye and Philip I. Pavlik and William R. Swartout and Arthur C. Graesser and Joseph Gunderson},
url = {http://link.springer.com/10.1007/978-3-319-93846-2_23},
doi = {10.1007/978-3-319-93846-2_23},
isbn = {978-3-319-93845-5 978-3-319-93846-2},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the International Conference on Artificial Intelligence in Education},
volume = {10948},
pages = {119--133},
publisher = {Springer International Publishing},
address = {London, UK},
abstract = {Knowledge decays across breaks in instruction. Learners lack the metacognition to self-assess their knowledge decay and effectively self-direct review, as well as lacking interactive exercises appropriate to their individual knowledge level. Adaptive learning systems offer the potential to mitigate these issues, by providing open learner models to facilitate learner’s understanding of their knowledge levels and by presenting personalized practice exercises. The current study analyzes differences in knowledge decay between learners randomly assigned to an intervention where they could use an adaptive system during a long gap between courses, compared with a control condition. The experimental condition used the Personal Assistant for Life-Long Learning (PAL3), a tablet-based adaptive learning system integrating multiple intelligent tutoring systems and conventional learning resources. It contained electronics content relevant to the experiment participants, Navy sailors who graduated from apprentice electronics courses (A-School) awaiting assignment to their next training (C-School). The study was conducted over one month, collecting performance data with a counterbalanced pre-, mid-, and post-test. The control condition exhibited the expected decay. The PAL3 condition showed a significant difference from the control, with no significant knowledge decay in their overall knowledge, despite substantial variance in usage for PAL3 (e.g., most of overall use in the first week, with fewer participants engaging as time went on). Interestingly, while overall decay was mitigated in PAL3, this result was primarily through gains in some knowledge offsetting losses in other knowledge. Overall, these results indicate that adaptive study tools can help prevent knowledge decay, even with voluntary usage.},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Merchant, Chirag; Schwartz, David; Goldberg, Stephen L.
Learning by Explaining to a Digital Doppelganger Incollection
In: Intelligent Tutoring Systems, vol. 10858, pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, Social Simulation, UARC, Virtual Humans
@incollection{wang_learning_2018,
title = {Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and Chirag Merchant and David Schwartz and Stephen L. Goldberg},
url = {http://link.springer.com/10.1007/978-3-319-91464-0_25},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-05-01},
booktitle = {Intelligent Tutoring Systems},
volume = {10858},
pages = {256--264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. An emerging computer animation technology makes the creation of digital doppelgangers an accessible reality. This allows researchers in pedagogical agents to explore previously unexplorable research questions, such as how does increasing the similarity in appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual listener in a learning-by-explaining paradigm. Results offer insight into the promise and limitation of this novel technology.},
keywords = {ARL, DoD, MedVR, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Bonial, Claire; Lukin, Stephanie M.; Foots, Ashley; Henry, Cassidy; Marge, Matthew; Pollard, Kimberly A.; Artstein, Ron; Traum, David; Voss, Clare R.
Human-Robot Dialogue and Collaboration in Search and Navigation Inproceedings
In: Proceedings of the AREA Workshop: Annotation, Recognition, and Evaluation of Actions, AREA 2018, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, Virtual Humans
@inproceedings{bonial_human-robot_2018,
title = {Human-Robot Dialogue and Collaboration in Search and Navigation},
author = {Claire Bonial and Stephanie M. Lukin and Ashley Foots and Cassidy Henry and Matthew Marge and Kimberly A. Pollard and Ron Artstein and David Traum and Clare R. Voss},
url = {http://www.areaworkshop.org/wp-content/uploads/2018/05/4.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the AREA Workshop: Annotation, Recognition, and Evaluation of Actions},
publisher = {AREA 2018},
address = {Miyazaki, Japan},
abstract = {Collaboration with a remotely located robot in tasks such as disaster relief and search and rescue can be facilitated by grounding natural language task instructions into actions executable by the robot in its current physical context. The corpus we describe here provides insight into the translation and interpretation a natural language instruction undergoes starting from verbal human intent, to understanding and processing, and ultimately, to robot execution. We use a ‘Wizard-of-Oz’ methodology to elicit the corpus data in which a participant speaks freely to instruct a robot on what to do and where to move through a remote environment to accomplish collaborativesearchandnavigationtasks. Thisdataoffersthepotentialforexploringandevaluatingactionmodelsbyconnectingnatural language instructions to execution by a physical robot (controlled by a human ‘wizard’). In this paper, a description of the corpus (soon to be openly available) and examples of actions in the dialogue are provided.},
keywords = {ARL, DoD, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Pincus, Eli; Artstein, Ron
Chahta Anumpa: A Multimodal Corpus of the Choctaw Language Inproceedings
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 3371–3376, ELRA, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{brixey_chahta_2018,
title = {Chahta Anumpa: A Multimodal Corpus of the Choctaw Language},
author = {Jacqueline Brixey and Eli Pincus and Ron Artstein},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/822.html},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {3371--3376},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {This paper presents a general use corpus for the Native American indigenous language Choctaw. The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for the threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Henry, Cassidy; Lukin, Stephanie; Artstein, Ron; Gervitz, Felix; Pollard, Kim; Bonial, Claire; Lei, Su; Voss, Clare R.; Marge, Matthew; Hayes, Cory J.; Hill, Susan G.
Dialogue Structure Annotation for Multi-Floor Interaction Inproceedings
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 104–111, ELRA, Miyazaki, Japan, 2018, ISBN: 979-10-95546-00-9.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{traum_dialogue_2018,
title = {Dialogue Structure Annotation for Multi-Floor Interaction},
author = {David Traum and Cassidy Henry and Stephanie Lukin and Ron Artstein and Felix Gervitz and Kim Pollard and Claire Bonial and Su Lei and Clare R. Voss and Matthew Marge and Cory J. Hayes and Susan G. Hill},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/672.html},
isbn = {979-10-95546-00-9},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {104--111},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {We present an annotation scheme for meso-level dialogue structure, specifically designed for multi-floor dialogue. The scheme includes a transaction unit that clusters utterances from multiple participants and floors into units according to realization of an initiator’s intent, and relations between individual utterances within the unit. We apply this scheme to annotate a corpus of multi-floor human-robot interaction dialogues. We examine the patterns of structure observed in these dialogues and present inter-annotator statistics and relative frequencies of types of relations and transaction units. Finally, some example applications of these annotations are introduced.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Brixey, Jacqueline; Bui, Trung; Chang, Walter; Kim, Doo Soon; Artstein, Ron; Georgila, Kallirroi
Edit me: A Corpus and a Framework for Understanding Natural Language Image Editing Inproceedings
In: Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC), LREC, Miyazaki, Japan, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_edit_2018,
title = {Edit me: A Corpus and a Framework for Understanding Natural Language Image Editing},
author = {Ramesh Manuvinakurike and Jacqueline Brixey and Trung Bui and Walter Chang and Doo Soon Kim and Ron Artstein and Kallirroi Georgila},
url = {http://www.lrec-conf.org/proceedings/lrec2018/pdf/481.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC)},
publisher = {LREC},
address = {Miyazaki, Japan},
abstract = {This paper introduces the task of interacting with an image editing program through natural language. We present a corpus of image edit requests which were elicited for real world images, and an annotation framework for understanding such natural language instructions and mapping them to actionable computer commands. Finally, we evaluate crowd-sourced annotation as a means of efficiently creating a sizable corpus at a reasonable cost.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stocco, Andrea; Laird, John; Lebiere, Christian; Rosenbloom, Paul
Empirical Evidence from Neuroimaging Data for a Standard Model of the Mind Inproceedings
In: Proceedings of the 40th Annual Meeting of the Cognitive Science Society, Cognitive Science Society, Madison, WI, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{stocco_empirical_2018,
title = {Empirical Evidence from Neuroimaging Data for a Standard Model of the Mind},
author = {Andrea Stocco and John Laird and Christian Lebiere and Paul Rosenbloom},
url = {https://www.researchgate.net/publication/325106544_Empirical_Evidence_from_Neuroimaging_Data_for_a_Standard_Model_of_the_Mind},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 40th Annual Meeting of the Cognitive Science Society},
publisher = {Cognitive Science Society},
address = {Madison, WI},
abstract = {In a recent paper, Laird, Lebiere, and Rosenbloom (2017) highlight how 40 years of research on cognitive architectures has begun to yield a dramatic convergence of different approaches towards a set of basic assumptions that they called the “Standard Model of the Mind” (SMM), in analogy to the Standard Model of particle physics. The SMM was designed to capture a consensus view of “human-like minds”, whether from AI or cognitive science, which if valid must also be true of the human brain. Here, we provide a preliminary test of this hypothesis based on a re-analysis of fMRI data from four tasks that span a wide range of cognitive functions and cognitive complexity, and are representative of the specific form of intelligence and flexibility that is associated with higherlevel human cognition. Using an established method (Dynamic Causal Modeling) to examine functional connectivity between brain regions, the SMM was compared against two alternative models that violate either functional or structural assumptions of the SMM. The results show that, in every dataset, the SMM significantly outperforms the other models, suggesting that the SMM best captures the functional requirements of brain dynamics in fMRI data among these alternatives.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Gordon, Carla; Choi, Hyungtak; Boberg, Jill; Jeon, Heesik; Traum, David
Toward Low-Cost Automated Evaluation Metrics for Internet of Things Dialogues Inproceedings
In: Proceedings of the 9th International Workshop on Spoken Dialogue Systems Technology (IWSDS), IWSDS, Singapore, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{georgila_toward_2018,
title = {Toward Low-Cost Automated Evaluation Metrics for Internet of Things Dialogues},
author = {Kallirroi Georgila and Carla Gordon and Hyungtak Choi and Jill Boberg and Heesik Jeon and David Traum},
url = {http://www.colips.org/conferences/iwsds2018/wp/wp-content/uploads/2018/03/IWSDS-2018_paper_18.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 9th International Workshop on Spoken Dialogue Systems Technology (IWSDS)},
publisher = {IWSDS},
address = {Singapore},
abstract = {We analyze a corpus of system-user dialogues in the Internet of Things domain. Our corpus is automatically, semi-automatically, and manually annotated with a variety of features both on the utterance level and the full dialogue level. The corpus also includes human ratings of dialogue quality collected via crowdsourcing. We calculate correlations between features and human ratings to identify which features are highly associated with human perceptions about dialogue quality in this domain. We also perform linear regression and derive a variety of dialogue quality evaluation functions. These evaluation functions are then applied to a heldout portion of our corpus, and are shown to be highly predictive of human ratings and outperform standard reward-based evaluation functions.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiao, Gang; Georgila, Kallirroi
A Comparison of Reinforcement Learning Methodologies in Two-Party and Three-Party Negotiation Dialogue Inproceedings
In: Proceedings of the The Thirty-First International Florida Artificial Intelligence Research Society Conference (FLAIRS-31), AAAI, Melbourne, FL, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{xiao_comparison_2018,
title = {A Comparison of Reinforcement Learning Methodologies in Two-Party and Three-Party Negotiation Dialogue},
author = {Gang Xiao and Kallirroi Georgila},
url = {https://aaai.org/ocs/index.php/FLAIRS/FLAIRS18/paper/view/17687},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the The Thirty-First International Florida Artificial Intelligence Research Society Conference (FLAIRS-31)},
publisher = {AAAI},
address = {Melbourne, FL},
abstract = {We use reinforcement learning to learn dialogue policies in a collaborative furniture layout negotiation task. We employ a variety of methodologies (i.e., learning against a simulated user versus co-learning) and algorithms. Our policies achieve the best solution or a good solution to this problem for a variety of settings and initial conditions, including in the presence of noise (e.g., due to speech recognition or natural language understanding errors). Also, our policies perform well even in situations not observed during training. Policies trained against a simulated user perform well while interacting with policies trained through co-learning, and vice versa. Furthermore, policies trained in a two-party setting are successfully applied to a three-party setting, and vice versa.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}