Publications
Search
Papangelis, Alexandros; Georgila, Kallirroi
Reinforcement learning of multi-issue negotiation dialogue policies Proceedings Article
In: Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 154–158, Association for Computational Linguistics, Prague, Czech Republic, 2015.
@inproceedings{papangelis_reinforcement_2015,
title = {Reinforcement learning of multi-issue negotiation dialogue policies},
author = {Alexandros Papangelis and Kallirroi Georgila},
url = {http://www.aclweb.org/anthology/W15-4621},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {154–158},
publisher = {Association for Computational Linguistics},
address = {Prague, Czech Republic},
abstract = {We use reinforcement learning (RL) to learn a multi-issue negotiation dialogue policy. For training and evaluation, we build a hand-crafted agenda-based policy, which serves as the negotiation partner of the RL policy. Both the agendabased and the RL policies are designed to work for a large variety of negotiation settings, and perform well against negotiation partners whose behavior has not been observed before. We evaluate the two models by having them negotiate against each other under various settings. The learned model consistently outperforms the agenda-based model. We also ask human raters to rate negotiation transcripts between the RL policy and the agenda-based policy, regarding the rationality of the two negotiators. The RL policy is perceived as more rational than the agenda-based policy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Paetzel, Maike; Manuvinakurike, Ramesh; DeVault, David
"So, which one is it?" The effect of alternative incremental architectures in a high-performance game-playing agent Proceedings Article
In: Proceedings of SIGDIAL 2015, pp. 77 – 86, Prague, Czech Republic, 2015.
@inproceedings{paetzel_so_2015,
title = {"So, which one is it?" The effect of alternative incremental architectures in a high-performance game-playing agent},
author = {Maike Paetzel and Ramesh Manuvinakurike and David DeVault},
url = {http://ict.usc.edu/pubs/So,%20which%20one%20is%20it%20-%20The%20effect%20of%20alternative%20incremental%20architectures%20in%20a%20high-performance%20game-playing%20agent.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of SIGDIAL 2015},
pages = {77 – 86},
address = {Prague, Czech Republic},
abstract = {This paper introduces Eve, a highperformance agent that plays a fast-paced image matching game in a spoken dialogue with a human partner. The agent can be optimized and operated in three different modes of incremental speech processing that optionally include incremental speech recognition, language understanding, and dialogue policies. We present our framework for training and evaluating the agent’s dialogue policies. In a user study involving 125 human participants, we evaluate three incremental architectures against each other and also compare their performance to human-human gameplay. Our study reveals that the most fully incremental agent achieves game scores that are comparable to those achieved in human-human gameplay, are higher than those achieved by partially and nonincremental versions, and are accompanied by improved user perceptions of efficiency, understanding of speech, and naturalness of interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Georgila, Kallirroi; Traum, David
Which Synthetic Voice Should I Choose for an Evocative Task? Proceedings Article
In: Proceeding of SIGDIAL 2015, pp. 105 – 113, Prague, Czech Republic, 2015.
@inproceedings{pincus_which_2015,
title = {Which Synthetic Voice Should I Choose for an Evocative Task?},
author = {Eli Pincus and Kallirroi Georgila and David Traum},
url = {http://ict.usc.edu/pubs/Which%20Synthetic%20Voice%20Should%20I%20Choose%20for%20an%20Evocative%20Task.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of SIGDIAL 2015},
pages = {105 – 113},
address = {Prague, Czech Republic},
abstract = {We explore different evaluation methods for 4 different synthetic voices and 1 human voice. We investigate whether intelligibility, naturalness, or likability of a voice is correlated to the voice’s evocative function potential, a measure of the voice’s ability to evoke an intended reaction from the listener. We also investigate the extent to which naturalness and likability ratings vary depending on whether or not exposure to a voice is extended and continuous vs. short-term and sporadic (interleaved with other voices). Finally, we show that an automatic test can replace the standard intelligibility tests for text-to-speech (TTS) systems, which eliminates the need to hire humans to performtranscription tasks saving both time and money.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
The Appraisal Equivalence Hypothesis: Verifying the domain-independence of a computational model of emotion dynamics Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{gratch_appraisal_2015,
title = {The Appraisal Equivalence Hypothesis: Verifying the domain-independence of a computational model of emotion dynamics},
author = {Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/The%20Appraisal%20Equivalence%20Hypothesis-Verifying%20the%20domain-independence%20of%20a%20computational%20model%20of%20emotion%20dynamics.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Appraisal theory is the most influential theory within affective computing, and serves as the basis for several computational models of emotion. The theory makes strong claims of domain-independence: seemingly different situations, both within and across domains are claimed to produce the identical emotional responses if and only if they are appraised the same way. This article tests this claim, and the predictions of a computational model that embodies it, in two very different interactive games. The results extend prior empirical evidence for appraisal theory to situations where emotions unfold and change over time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazari, Zahra; Lucas, Gale; Gratch, Jonathan
Multimodal Approach for Automatic Recognition of Machiavellianism Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{nazari_multimodal_2015,
title = {Multimodal Approach for Automatic Recognition of Machiavellianism},
author = {Zahra Nazari and Gale Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Multimodal%20Approach%20for%20Automatic%20Recognition%20of%20Machiavellianism.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Machiavellianism, by definition, is the tendency to use other people as a tool to achieve one's own goals. Despite the large focus on the Big Five traits of personality, this anti-social trait is relatively unexplored in the computational realm. Automatically recognizing anti-social traits can have important uses across a variety of applications. In this paper, we use negotiation as a setting that provides Machiavellians with the opportunity to reveal their exploitative inclinations. We use textual, visual, acoustic, and behavioral cues to automatically predict High vs. Low Machiavellian personalities. These learned models have good accuracy when compared with other personalityrecognition methods, and we provide evidence that the automatically-learned models are consistent with existing literature on this anti-social trait, giving evidence that these results can generalize to other domains.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Hoegen, Rens; Lucas, Gale; Gratch, Jonathan
Emotional Signaling in a Social Dilemma: an Automatic Analysis Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{stratou_emotional_2015,
title = {Emotional Signaling in a Social Dilemma: an Automatic Analysis},
author = {Giota Stratou and Rens Hoegen and Gale Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emotional%20Signaling%20in%20a%20Social%20Dilemma-an%20Automatic%20Analysis.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Emotional signaling plays an important role in negotiations and other social decision-making tasks as it can signal intention and shape joint decisions. Specifically it has been shown to influence cooperation or competition. This has been shown in previous studies for scripted interactions that control emotion signaling and rely on manual coding of affect. In this work we examine face-to-face interactions in an iterative social dilemma task (prisoner’s dilemma) via an automatic framework for facial expression analysis. We explore if automatic analysis of emotion can give insight into the social function of emotion in face-toface interactions. Our analysis suggests that positive and negative displays of emotion are associated with more prosocial and proself game acts respectively. Moreover signaling cooperative intentions to the opponent via positivity can leave participants more open to exploitation, whereas signaling a more tough stance via negativity seems to discourage exploitation. However, the benefit of negative affect is short-term and both players do worse over time if they show negative emotions},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale; Gratch, Jonathan; Rosenfeld, Avi
Saying YES! The Cross-cultural Complexities of Favors and Trust in Human-Agent Negotiation Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{mell_saying_2015,
title = {Saying YES! The Cross-cultural Complexities of Favors and Trust in Human-Agent Negotiation},
author = {Johnathan Mell and Gale Lucas and Jonathan Gratch and Avi Rosenfeld},
url = {http://ict.usc.edu/pubs/Saying%20YES!%20The%20Cross-cultural%20Complexities%20of%20Favors%20and%20Trust%20in%20Human-Agent%20Negotiation.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Negotiation between virtual agents and humans is a complex field that requires designers of systems to be aware not only of the efficient solutions to a given game, but also the mechanisms by which humans create value over multiple negotiations. One way of considering the agent’s impact beyond a single negotiation session is by considering the use of external “ledgers” across multiple sessions. We present results that describe the effects of favor exchange on negotiation outcomes, fairness, and trust for two distinct cross-cultural populations, and illustrate the ramifications of their similarities and differences on virtual agent design.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wortwein, Torsten; Morency, Louis-Philippe; Scherer, Stefan
Automatic Assessment and Analysis of Public Speaking Anxiety: A Virtual Audience Case Study Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{wortwein_automatic_2015,
title = {Automatic Assessment and Analysis of Public Speaking Anxiety: A Virtual Audience Case Study},
author = {Torsten Wortwein and Louis-Philippe Morency and Stefan Scherer},
url = {http://ict.usc.edu/pubs/Automatic%20Assessment%20and%20Analysis%20of%20Public%20Speaking%20Anxiety%20-%20A%20Virtual%20Audience%20Case%20Study.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Public speaking has become an integral part of many professions and is central to career building opportunities. Yet, public speaking anxiety is often referred to as the most common fear in everyday life and can hinder one’s ability to speak in public severely. While virtual and real audiences have been successfully utilized to treat public speaking anxiety in the past, little work has been done on identifying behavioral characteristics of speakers suffering from anxiety. In this work, we focus on the characterization of behavioral indicators and the automatic assessment of public speaking anxiety. We identify several indicators for public speaking anxiety, among them are less eye contact with the audience, reduced variability in the voice, and more pauses. We automatically assess the public speaking anxiety as reported by the speakers through a self-assessment questionnaire using a speaker independent paradigm. Our approach using ensemble trees achieves a high correlation between ground truth and our estimation (r=0.825). Complementary to automatic measures of anxiety, we are also interested in speakers’ perceptual differences when interacting with a virtual audience based on their level of anxiety in order to improve and further the development of virtual audiences for the training of public speaking and the reduction of anxiety.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
People Show Envy, Not Guilt, when Making Decisions with Machines Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{de_melo_people_2015,
title = {People Show Envy, Not Guilt, when Making Decisions with Machines},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/People%20Show%20Envy,%20Not%20Guilt,%20when%20Making%20Decisions%20with%20Machines.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Research shows that people consistently reach more efficient solutions than those predicted by standard economic models, which assume people are selfish. Artificial intelligence, in turn, seeks to create machines that can achieve these levels of efficiency in human-machine interaction. However, as reinforced in this paper, people’s decisions are systematically less efficient – i.e., less fair and favorable – with machines than with humans. To understand the cause of this bias, we resort to a wellknown experimental economics model: Fehr and Schmidt’s inequity aversion model. This model accounts for people’s aversion to disadvantageous outcome inequality (envy) and aversion to advantageous outcome inequality (guilt). We present an experiment where participants engaged in the ultimatum and dictator games with human or machine counterparts. By fitting this data to Fehr and Schmidt’s model, we show that people acted as if they were just as envious of humans as of machines; but, in contrast, people showed less guilt when making unfavorable decisions to machines. This result, thus, provides critical insight into this bias people show, in economic settings, in favor of humans. We discuss implications for the design of machines that engage in social decision making with humans.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Eskenazi, Maxine; Black, Alan W.; Lee, Sungjin; Traum, David
THE REAL CHALLENGE 2014: PROGRESS AND PROSPECTS Proceedings Article
In: Proceeding of SIGDIAL 2015, pp. 209 – 216, 2015.
@inproceedings{eskenazi_real_2015,
title = {THE REAL CHALLENGE 2014: PROGRESS AND PROSPECTS},
author = {Maxine Eskenazi and Alan W. Black and Sungjin Lee and David Traum},
url = {http://ict.usc.edu/pubs/THE%20REAL%20CHALLENGE%202014-PROGRESS%20AND%20PROSPECTS.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of SIGDIAL 2015},
pages = {209 – 216},
abstract = {The REAL Challenge took place for the first time in 2014, with a long term goal of creating streams of real data that the research community can use, by fostering the creation of systems that are capable of attracting real users. A novel approach is to have high school and undergraduate students devise the types of applications that would attract many real users and that need spoken interaction. The projects are presented to researchers from the spoken dialog research community and the researchers and students work together to refine and develop the ideas. Eleven projects were presented at the first workshop. Many of them have found mentors to help in the next stages of the projects. The students have also brought out issues in the use of speech for real applications. Those issues involve privacy and significant personalization of the applications. While long-term impact of the challenge remains to be seen, the challenge has already been a success at its immediate aims of bringing new ideas and new researchers into the community, and serves as a model for related outreach efforts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Sayan; Laksana, Eugene; Scherer, Stefan; Morency, Louis-Philippe
A Multi-label Convolutional Neural Network Approach to Cross-Domain Action Unit Detection Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{ghosh_multi-label_2015,
title = {A Multi-label Convolutional Neural Network Approach to Cross-Domain Action Unit Detection},
author = {Sayan Ghosh and Eugene Laksana and Stefan Scherer and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/A%20Multi-label%20Convolutional%20Neural%20Network%20Approach%20to%20Cross-Domain%20Action%20Unit%20Detection.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Action Unit (AU) detection from facial images is an important classification task in affective computing. However most existing approaches use carefully engineered feature extractors along with off-the-shelf classifiers. There has also been less focus on how well classifiers generalize when tested on different datasets. In our paper, we propose a multi-label convolutional neural network approach to learn a shared representation between multiple AUs directly from the input image. Experiments on three AU datasets- CK+, DISFA and BP4D indicate that our approach obtains competitive results on all datasets. Cross-dataset experiments also indicate that the network generalizes well to other datasets, even when under different training and testing conditions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hiraoka, Takuya; Georgila, Kallirroi; Nouri, Elnaz; Traum, David; Nakamura, Satoshi
Reinforcement Learning in Multi-Party Trading Dialog Proceedings Article
In: Proceeding of SIGDIAL 2015, pp. 32 – 41, Prague, Czech Republic, 2015.
@inproceedings{hiraoka_reinforcement_2015,
title = {Reinforcement Learning in Multi-Party Trading Dialog},
author = {Takuya Hiraoka and Kallirroi Georgila and Elnaz Nouri and David Traum and Satoshi Nakamura},
url = {http://ict.usc.edu/pubs/Reinforcement%20Learning%20in%20Multi-Party%20Trading%20Dialog.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of SIGDIAL 2015},
pages = {32 – 41},
address = {Prague, Czech Republic},
abstract = {In this paper, we apply reinforcement learning (RL) to a multi-party trading scenario where the dialog system (learner) trades with one, two, or three other agents.We experiment with different RL algorithms and reward functions. The negotiation strategy of the learner is learned through simulated dialog with trader simulators. In our experiments, we evaluate how the performance of the learner varies depending on the RL algorithm used and the number of traders. Our results show that (1) even in simple multi-party trading dialog tasks, learning an effective negotiation policy is a very hard problem; and (2) the use of neural fitted Q iteration combined with an incremental reward function produces negotiation policies as effective or even better than the policies of two strong hand-crafted baselines.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Wortwein, Torsten; Morency, Louis-Philippe; Shapiro, Ari; Scherer, Stefan
Exploring Feedback Strategies to Improve Public Speaking: An Interactive Virtual Audience Framework Proceedings Article
In: Proceedings of UbiComp 2015, ACM, Osaka, Japan, 2015.
@inproceedings{chollet_exploring_2015,
title = {Exploring Feedback Strategies to Improve Public Speaking: An Interactive Virtual Audience Framework},
author = {Mathieu Chollet and Torsten Wortwein and Louis-Philippe Morency and Ari Shapiro and Stefan Scherer},
url = {http://ict.usc.edu/pubs/Exploring%20Feedback%20Strategies%20to%20Improve%20Public%20Speaking%20-%20An%20Interactive%20Virtual%20Audience%20Framework.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of UbiComp 2015},
publisher = {ACM},
address = {Osaka, Japan},
abstract = {Good public speaking skills convey strong and effective communication, which is critical in many professions and used in everyday life. The ability to speak publicly requires a lot of training and practice. Recent technological developments enable new approaches for public speaking training that allow users to practice in a safe and engaging environment. We explore feedback strategies for public speaking training that are based on an interactive virtual audience paradigm. We investigate three study conditions: (1) a non-interactive virtual audience (control condition), (2) direct visual feedback, and (3) nonverbal feedback from an interactive virtual audience. We perform a threefold evaluation based on self-assessment questionnaires, expert assessments, and two objectively annotated measures of eye-contact and avoidance of pause fillers. Our experiments show that the interactive virtual audience brings together the best of both worlds: increased engagement and challenge as well as improved public speaking skills as judged by experts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale; Gratch, Jonathan; Scherer, Stefan; Boberg, Jill; Stratou, Giota
Towards an Affective Interface for Assessment of Psychological Distress Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{lucas_towards_2015,
title = {Towards an Affective Interface for Assessment of Psychological Distress},
author = {Gale Lucas and Jonathan Gratch and Stefan Scherer and Jill Boberg and Giota Stratou},
url = {http://ict.usc.edu/pubs/Towards%20an%20Affective%20Interface%20for%20Assessment%20of%20Psychological%20Distress.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Even with the rise in use of TeleMedicine for health care and mental health, research suggests that clinicians may have difficulty reading nonverbal cues in computer-mediated situations. However, the recent progress in tracking affective markers (i.e., displays of emotional expressions on face and in voice) has opened the door to new clinical applications that might help health care providers better read nonverbal behaviors when employing TeleMedicine. For example, an interface that automatically quantified affective markers could assist clinicians in their assessment of and treatment for psychological distress (i.e., symptoms of depression and PTSD). To move towards this prospect, we will show that clinicians’ judgments of these nonverbal affective markers (e.g., smile, frown, eye contact, tense voice) could be informed by such technology. The results of our evaluation suggest that clinicians’ ratings of nonverbal affective markers are less predictive of psychological distress than automatically quantified affective markers. Because such quantifications are more strongly associated with psychological distress than clinician ratings of these same nonverbal behaviors, an affective interface providing quantifications of nonverbal affective markers could potentially improve assessment of psychological distress.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Lucas, Gale; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli; Nichols, Jeffrey
GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{gratch_goaalll_2015,
title = {GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion},
author = {Jonathan Gratch and Gale Lucas and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler and Jeffrey Nichols},
url = {http://ict.usc.edu/pubs/GOAALLL!%20Using%20Sentiment%20in%20the%20World%20Cup%20to%20Explore%20Theories%20of%20Emotion.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Sporting events evoke strong emotions amongst fans and thus act as natural laboratories to explore emotions and how they unfold in the wild. Computational tools, such as sentiment analysis, provide new ways to examine such dynamic emotional processes. In this article we use sentiment analysis to examine tweets posted during 2014 World Cup. Such analysis gives insight into how people respond to highly emotional events, and how these emotions are shaped by contextual factors, such as prior expectations, and how these emotions change as events unfold overtime. Here we report on some preliminary analysis of a World Cup twitter corpus using sentiment analysis techniques. We show these tools can give new insights into existing theories of what makes a sporting match exciting. This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion. We also discuss some challenges that such data present for existing sentiment analysis techniques and discuss future analysis.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xu, Jie; Montague, Enid; Gratch, Jonathan; Hancock, Peter; Jeon, Myounghoon; Pfaff, Mark S.
Advances of Research in Affective Processes in Communication and Collaboration Journal Article
In: Proceedings of the Human Factors and Ergonomics Society Annual Meeting, vol. 59, no. 1, pp. 299–302, 2015, ISSN: 1541-9312.
@article{xu_advances_2015,
title = {Advances of Research in Affective Processes in Communication and Collaboration},
author = {Jie Xu and Enid Montague and Jonathan Gratch and Peter Hancock and Myounghoon Jeon and Mark S. Pfaff},
url = {http://pro.sagepub.com/lookup/doi/10.1177/1541931215591061},
doi = {10.1177/1541931215591061},
issn = {1541-9312},
year = {2015},
date = {2015-09-01},
journal = {Proceedings of the Human Factors and Ergonomics Society Annual Meeting},
volume = {59},
number = {1},
pages = {299–302},
abstract = {Affective processes have been an important research area for human factors and ergonomics. Although there is an obvious connection between affect and communication and collaboration, little research has been conducted in the human factors community until recently. In this panel, the panelists will discuss recent advances in affective research in communication and collaboration systems. Theoretical perspectives in human computer interaction, human agent interaction, and teamwork that take affective process into account will be discussed. Methodological issues will also be addressed, such as the measurements of affect, research design, and data analysis methods. Finally the applications of the theories and methods in different systems, such as human robot interaction, healthcare, and multi-tasking teams, will be discussed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Stratou, Giota; Morency, Louis-Philippe; DeVault, David; Hartholt, Arno; Fast, Edward; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Stacy, Marcella; Traum, David; Rizzo, Albert
A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews Proceedings Article
In: Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on, pp. 787–789, IEEE, Xi'an, China, 2015.
@inproceedings{stratou_demonstration_2015,
title = {A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews},
author = {Giota Stratou and Louis-Philippe Morency and David DeVault and Arno Hartholt and Edward Fast and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Marcella Stacy and David Traum and Albert Rizzo},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7344661},
doi = {10.1109/ACII.2015.7344661},
year = {2015},
date = {2015-09-01},
booktitle = {Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on},
pages = {787–789},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. With this demo, we focus our attention on the perception part of the system, a multimodal framework which captures and analyzes user state behavior for both behavioral understanding and interactional purposes. We will demonstrate real-time user state sensing as a part of the SimSensei architecture and discuss how this technology enabled automatic analysis of behaviors related to psychological distress.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Leuski, Anton; Marsella, Stacy; Casas, Dan; Kang, Sin-Hwa; Shapiro, Ari
A Platform for Building Mobile Virtual Humans Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 310–319, Springer, Delft, Netherlands, 2015.
@inproceedings{feng_platform_2015,
title = {A Platform for Building Mobile Virtual Humans},
author = {Andrew Feng and Anton Leuski and Stacy Marsella and Dan Casas and Sin-Hwa Kang and Ari Shapiro},
url = {http://ict.usc.edu/pubs/A%20Platform%20for%20Building%20Mobile%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {310--319},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {We describe an authoring framework for developing virtual humans on mobile applications. The framework abstracts many elements needed for virtual human generation and interaction, such as the rapid development of nonverbal behavior, lip syncing to speech, dialogue management, access to speech transcription services, and access to mobile sensors such as the microphone, gyroscope and location components.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
When the going gets tough: Grit predicts costly perseverance Journal Article
In: Journal of Research in Personality, vol. 59, pp. 15–22, 2015, ISSN: 00926566.
@article{lucas_when_2015,
title = {When the going gets tough: Grit predicts costly perseverance},
author = {Gale M. Lucas and Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/When%20the%20going%20gets%20tough-Grit%20predicts%20costly%20perseverance.pdf},
doi = {10.1016/j.jrp.2015.08.004},
issn = {00926566},
year = {2015},
date = {2015-08-01},
journal = {Journal of Research in Personality},
volume = {59},
pages = {15–22},
abstract = {In this research, we investigate how grittier individuals might incur some costs by persisting when they could move on. Grittier participants were found to be less willing to give up when failing even though they were likely to incur a cost for their persistence. First, grittier participants are more willing to risk failing to complete a task by persisting on individual items. Second, when they are losing, they expend more effort and persist longer in a game rather than quit. Gritty participants have more positive emotions and expectations toward the task, which mediates the relationship between grit and staying to persist when they are losing. Results show gritty individuals are more willing to risk suffering monetary loss to persist.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Manuvinakurike, Ramesh; Paetzel, Maike; DeVault, David
Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection Proceedings Article
In: Proceedings of SEMDIAL 2015 goDIAL, pp. 113 – 121, Gothenburg, Sweden, 2015.
@inproceedings{manuvinakurike_reducing_2015,
title = {Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection},
author = {Ramesh Manuvinakurike and Maike Paetzel and David DeVault},
url = {http://ict.usc.edu/pubs/Reducing%20the%20Cost%20of%20Dialogue%20System%20Training%20and%20Evaluation%20with%20Online,%20Crowd-Sourced%20Dialogue%20Data%20Collection.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of SEMDIAL 2015 goDIAL},
pages = {113 – 121},
address = {Gothenburg, Sweden},
abstract = {This paper presents and analyzes an approach to crowd-sourced spoken dialogue data collection. Our approach enables low cost collection of browser-based spoken dialogue interactions between two remote human participants (human-human condition) as well as one remote human participant and an automated dialogue system (human-agent condition). We present a case study in which 200 remote participants were recruited to participate in a fast-paced image matching game, and which included both human-human and human-agent conditions. We discuss several technical challenges encountered in achieving this crowd-sourced data collection, and analyze the costs in time and money of carrying out the study. Our results suggest the potential of crowdsourced spoken dialogue data to lower costs and facilitate a range of research in dialogue modeling, dialogue system design, and system evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2001
Gratch, Jonathan; Douglas, Jay
Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after Proceedings Article
In: Proceedings of International Conference on Virtual Storytelling, pp. 100–112, Avignon, France, 2001, ISBN: 3-540-42611-6.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{gratch_adaptive_2001,
title = {Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after},
author = {Jonathan Gratch and Jay Douglas},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
doi = {10.1007/3-540-45420-9_12},
isbn = {3-540-42611-6},
year = {2001},
date = {2001-10-01},
booktitle = {Proceedings of International Conference on Virtual Storytelling},
pages = {100–112},
address = {Avignon, France},
series = {LNCS},
abstract = {Interacting Storytelling systems integrate AI techniques such as planning with narrative representations to generate stories. In this paper, we discuss the use of planning formalisms in Interactive Storytelling from the perspective of story generation and authoring. We compare two different planning formalisms, Hierarchical Task Network (HTN) planning and Heuristic Search Planning (HSP). While HTN provide a strong basis for narrative coherence in the context of interactivity, HSP offer additional flexibility and the generation of stories and the mechanisms for generating comic situations.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans Proceedings Article
In: Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, Virtual Humans
@inproceedings{marsella_effect_2001,
title = {The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affect-%20Modeling%20the%20Impact%20of%20Emotional%20State%20on%20the%20Behavior%20of%20Interactive%20Virtual%20Humans.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents},
address = {Montreal, Canada},
abstract = {A person's behavior provides signiï¬cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a range of cognitive and motor capabilities, a model of emotional appraisal, and a model of the impact of emotional state on physical behavior. We describe the key research issues, our approach, and an initial implementation in an Army peacekeeping scenario.},
keywords = {DTIC, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Hill, Randall W.; Gratch, Jonathan; Johnson, W. Lewis; Kyriakakis, Chris; Labore, Catherine; Lindheim, Richard; Marsella, Stacy C.; Miraglia, D.; Moore, Bridget; Morie, Jacquelyn; Rickel, Jeff; Thiebaux, Marcus; Tuch, L.; Whitney, Richard; Douglas, Jay
Toward the Holodeck: Integrating Graphics, Sound, Character and Story Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, Virtual Humans, Virtual Worlds
@inproceedings{swartout_toward_2001,
title = {Toward the Holodeck: Integrating Graphics, Sound, Character and Story},
author = {William Swartout and Randall W. Hill and Jonathan Gratch and W. Lewis Johnson and Chris Kyriakakis and Catherine Labore and Richard Lindheim and Stacy C. Marsella and D. Miraglia and Bridget Moore and Jacquelyn Morie and Jeff Rickel and Marcus Thiebaux and L. Tuch and Richard Whitney and Jay Douglas},
url = {http://ict.usc.edu/pubs/Toward%20the%20Holodeck-%20Integrating%20Graphics,%20Sound,%20Character%20and%20Story.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {We describe an initial prototype of a holodeck-like environment that we have created for the Mission Rehearsal Exercise Project. The goal of the project is to create an experience learning system where the participants are immersed in an environment where they can encounter the sights, sounds, and circumstances of realworld scenarios. Virtual humans act as characters and coaches in an interactive story with pedagogical goals.},
keywords = {DTIC, Social Simulation, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Douglas, Jay; Gratch, Jonathan
Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{douglas_adaptive_2001,
title = {Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After},
author = {Jay Douglas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {Creating dramatic narratives for real-time virtual reality environments is complicated by the lack of temporal distance between the occurrence of an event and its telling in the narrative. This paper describes the application of a multiprocessing operating system architecture to the creation of adaptive narratives, narratives that use autonomous actors or agents to create real-time dramatic experiences for human interactors. We also introduce the notion of dramatic acts and dramatic functions and indicate their use in constructing this real-time drama.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Modeling Emotions in the Mission Rehearsal Exercise Proceedings Article
In: Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation, pp. 457–466, Orlando, FL, 2001.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, Virtual Humans
@inproceedings{gratch_modeling_2001,
title = {Modeling Emotions in the Mission Rehearsal Exercise},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Emotions%20in%20the%20Mission%20Rehearsal%20Exercise.pdf},
year = {2001},
date = {2001-05-01},
booktitle = {Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation},
pages = {457–466},
address = {Orlando, FL},
abstract = {This paper discusses our attempts to model realistic human behavior in the context of the Mission Rehearsal Exercise system (MRE), a high-end virtual training environment designed to support dismounted infantry training between a human participant and elements of his command. The system combines immersive graphics, sound, and interactive characters controlled by artificial intelligence programs. Our goal in this paper is to show how some of the daunting subtlety in human behavior can be modeled by intelligent agents and in particular to focus on the role of modeling typical human emotional responses to environmental stimuli.},
keywords = {DTIC, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Proceedings Article
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive 'interlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Olsen, Mari; Traum, David; Ess-Dykema, Carol Van; Weinberg, Amy
Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System Proceedings Article
In: Machine Translation Summit VIII, Santiago de Compostela, Spain, 2001.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{olsen_implicit_2001,
title = {Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System},
author = {Mari Olsen and David Traum and Carol Van Ess-Dykema and Amy Weinberg},
url = {http://ict.usc.edu/pubs/Implicit%20Cues%20for%20Explicit%20Generation-%20Using%20Telicity%20as%20a%20Cue%20for%20Tense%20Structure%20in%20Chinese%20to%20English%20MT%20System.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Machine Translation Summit VIII},
address = {Santiago de Compostela, Spain},
abstract = {In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Tense information is crucial to providing accurate and fluent translations into English. Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is optional in Chinese and is not present in the majority of sentences. We report on a project that assesses the relative contribution of the lexical aspect features of (a)telicity reflected in the Lexical Conceptual Structure of the input text, versus more overt aspectual and adverbial markers of tense, to suggest tense structure in the English translation of a Chinese newspaper corpus. Incorporating this information allows a 20% to 35% boost in the accuracy of tense relization with the best accuracy rate of 92% on a corpus of Chinese articles.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Damiano, Rossana; Traum, David
Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems Proceedings Article
In: NAACL 2001 Workshop on Adaptation in Dialogue Systems, 2001.
Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{damiano_anticipatory_2001,
title = {Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems},
author = {Rossana Damiano and David Traum},
url = {http://ict.usc.edu/pubs/Anticipatory%20planning%20for%20decision-theoretic%20grounding%20and%20task%20advancement%20in%20mixed-initiative%20dialogue%20systems.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {NAACL 2001 Workshop on Adaptation in Dialogue Systems},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, pp. 278–285, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, Virtual Humans
@inproceedings{gratch_tears_2001,
title = {Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Tears%20and%20Fears-%20Modeling%20emotions%20and%20emotional%20behaviors%20in%20synthetic%20agents.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
pages = {278–285},
address = {Montreal, Canada},
abstract = {Emotions play a critical role in creating engaging and believable characters to populate virtual worlds. Our goal is to create general computational models to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. In service of this goal, we integrate two complementary approaches to emotional modeling into a single unified system. Gratch's Émile system focuses on the problem of emotional appraisal: how emotions arise from an evaluation of how environmental events relate to an agent's plans and goals. Marsella et al. 's IPD system focuses more on the impact of emotions on behavior, including the impact on the physical expressions of emotional state through suitable choice of gestures and body language. This integrated model is layered atop Steve, a pedagogical agent architecture, and exercised within the context of the Mission Rehearsal Exercise, a prototype system designed to teach decision- making skills in highly evocative situations.},
keywords = {DTIC, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations Proceedings Article
In: Proceedings of 23rd Annual Conference of the Cognitive Science Society, Edinburgh, Scotland, 2001.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2001,
title = {Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20of%20Emotions%20and%20Plans%20in%20Multi-Agent%20Simulations.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 23rd Annual Conference of the Cognitive Science Society},
address = {Edinburgh, Scotland},
abstract = {The goal of this research is to create general computational models of the interplay between affect, cognition and behavior. These models are being designed to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. We attempt to capture both the cognitive and behavioral aspects of emotion, circumscribed to the role emotions play in the performance of concrete physical tasks. We address how emotions arise from an evaluation of the relationship between environmental events and an agent's plans and goals, as well as the impact of emotions on behavior, in particular the impact on the physical expressions of emotional state through suitable choice of gestures and body language. The approach is illustrated within a virtual reality training environment.},
keywords = {DTIC, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2000
Hill, Randall W.; Gratch, Jonathan; Rosenbloom, Paul
Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, DTIC, Social Simulation, Virtual Humans
@inproceedings{hill_flexible_2000,
title = {Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces},
author = {Randall W. Hill and Jonathan Gratch and Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Flexible%20Group%20Behavior-%20Virtual%20Commanders%20for%20Synthetic%20Battlespaces.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
address = {Barcelona, Spain},
abstract = {This paper describes a project to develop autonomous commander agents for synthetic battlespaces. The commander agents plan missions, monitor their execution, and replan when necessary. To reason about the social aspects of group behavior, the commanders take various social stances that enable them to collaborate with friends, exercise or defer to authority, and thwart their foes. The purpose of this paper is to describe these capabilities and how they came to be through a series of lessons learned while developing autonomous agents for this domain.},
keywords = {CogArch, Cognitive Architecture, DTIC, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Human-like behavior, alas, demands human-like intellect Proceedings Article
In: Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents, Barcelona, Spain, 2000.
Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{gratch_human-like_2000,
title = {Human-like behavior, alas, demands human-like intellect},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Human-like%20behavior%20alas%20demands%20human-like%20intellect.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents},
address = {Barcelona, Spain},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Gratch, Jonathan
How Long Can an Agent Look Away From a Target? Proceedings Article
In: 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{kim_how_2000,
title = {How Long Can an Agent Look Away From a Target?},
author = {Youngjun Kim and Randall W. Hill and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/how%20long%20can%20you%20look%20away%20from%20a%20target.pdf},
year = {2000},
date = {2000-05-01},
booktitle = {9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Situation awareness (SA) is the perception of the elements in the environment within a volume of time and space, the comprehension of their meaning, and the projection of their status in the near future [3]. Although the impact of situation awareness and assessment on humans in complex systems is clear, no one theory for SA has been developed. A critical aspect of the SA problem is that agents must construct an overall view of a dynamically changing world using limited sensor channels. For instance, a (virtual) pilot, who visually tracks the location and direction of several vehicles that he cannot see simultaneously, must shift its visual field of view to scan the environment and to sense the situation involved. How he directs his attention, for how long, and how he efficiently reacquires targets is the central question we address in this paper. We describe the perceptual coordination that helps a virtual pilot efficiently track one or more objects. In SA, it is important for a virtual pilot having a limited visual field of view to gather more information from its environment and to choose appropriate actions to take in the environment without losing the target.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Socially Situated Planning Book Section
In: Socially Intelligent Agents, Multiagent Systems, Artificial Societies, and Simulated Organizations, vol. 3, pp. 181–188, AAAI Fall Symposium on Socially Intelligent Agents - The Human in the Loop, North Falmouth, MA, 2000.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@incollection{gratch_socially_2000,
title = {Socially Situated Planning},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Socially%20Situated%20Planning.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Socially Intelligent Agents, Multiagent Systems, Artificial Societies, and Simulated Organizations},
volume = {3},
pages = {181–188},
address = {AAAI Fall Symposium on Socially Intelligent Agents - The Human in the Loop, North Falmouth, MA},
abstract = {Introduction: Virtual environments such as training simulators and video games do an impressive job at modeling the physical dynamics of synthetic worlds but fall short when modeling the social dynamics of anything but the most impoverished human encounters. Yet the social dimension is at least as important as good graphics for creating an engaging game or effective training tool. Commercial flight simulators accurately model the technical aspects of flight but many aviation disasters arise from social breakdowns: poor management skills in the cockpit, or the effects of stress and emotion. Perhaps the biggest consumer of simulation technology, the U.S. military, identifies unrealistic human and organizational behavior as a major limitation of existing simulation technology (NRC, 1998). And of course the entertainment industry has long recognized the importance of good character, emotional attachment and rich social interactions to "put butts in seats." This article describes a research effort to endow virtual training environments with richer models of social behavior. We have been developing autonomous and semi-autonomous software agents that plan and act while situated in a social network of other entities, human and synthetic (Hill et. al, 1997; Tambe, 1997; Gratch and Hill, 1999). My work has focused on making agents act in an organization and obey social constraints, coordinate their behavior, negotiate conflicts, but also obey their own self-interest and show a range of individual differences in their behavior and willingness to violate social norms, albeit within the relatively narrow context of a specific training exercise.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gratch, Jonathan
Modeling the Interplay Between Emotion and Decision-Making Proceedings Article
In: Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{gratch_modeling_2000,
title = {Modeling the Interplay Between Emotion and Decision-Making},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20Between%20Emotion%20and%20Decision-Making.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Current models of computer-generated forces are limited by their inability to model many of the moderators that influence the performance of real troops in the field such as the effects of stress, emotion, and individual differences. This article discusses an extension to our command and control modeling architecture that begins to address how behavioral moderators influence the command decision-making process. Our Soar-Cfor command architecture was developed under the STOW and ASTT programs to support distributed command and control decision-making in the domain of army aviation planning. We have recently extended this architecture to model how people appraise the emotional significance of events and how these events influence decision making.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Èmile: Marshalling Passions in Training and Education Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, pp. 325–332, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{gratch_emile_2000,
title = {Èmile: Marshalling Passions in Training and Education},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emile-%20Marshalling%20Passions%20in%20Training%20and%20Education.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
pages = {325–332},
address = {Barcelona, Spain},
abstract = {Emotional reasoning can be an important contribution to automated tutoring and training systems. This paper describes �mile, a model of emotional reasoning that builds upon existing approaches and significantly generalizes and extends their capabilities. The main contribution is to show how an explicit planning model allows a more general treatment of several stages of the reasoning process. The model supports educational applications by allowing agents to appraise the emotional significance of events as they relate to students' (or their own) plans and goals, model and predict the emotional state of others, and alter behavior accordingly.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
1999
Gratch, Jonathan; Hill, Randall W.
Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces Proceedings Article
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, Virtual Humans
@inproceedings{gratch_continuous_1999,
title = {Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces},
author = {Jonathan Gratch and Randall W. Hill},
url = {http://ict.usc.edu/pubs/Continuous%20Planning%20and%20Collaboration%20for%20Command%20and%20Control%20in%20Joint%20Synthetic%20Battlespaces.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {In this paper we describe our efforts to model command and control entities for Joint Synthetic Battlespaces. Command agents require a broader repertoire of capabilities than is typically modeled in simulation. They must develop mission plans involving multiple subordinate units, monitor execution, dynamically modify mission plans in response to situational contingencies, collaborate with other decision makers, and deal with a host of organizational issues. We describe our approach to command agent modeling that addresses a number of these issues through its continuous and collaborative approach to mission planning.},
keywords = {DTIC, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Hill, Randall W.; III, LTC George Stone
Deriving Priority Intelligence Requirements for Synthetic Command Entities Proceedings Article
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, Virtual Humans
@inproceedings{gratch_deriving_1999,
title = {Deriving Priority Intelligence Requirements for Synthetic Command Entities},
author = {Jonathan Gratch and Stacy C. Marsella and Randall W. Hill and LTC George Stone III},
url = {http://ict.usc.edu/pubs/Deriving%20Priority%20Intelligence%20Requirements%20for%20Synthetic%20Command%20Entities.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {Simulation-based training is using increasingly complex synthetic forces. As more complex multiechelon synthetic forces are employed in simulations, the need for a realistic model of their command and control behavior becomes more urgent. In this paper we discuss one key component of such a model, the autonomous generation and use of priority intelligence requirements within multi-echelon plans.},
keywords = {DTIC, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Why You Should Buy an Emotional Planner Proceedings Article
In: Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures, 1999.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{gratch_why_1999,
title = {Why You Should Buy an Emotional Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Why%20You%20Should%20Buy%20an%20Emotional%20Planner.pdf},
year = {1999},
date = {1999-01-01},
booktitle = {Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures},
abstract = {Computation models of emotion have begun to address the problem of how agents arrive at a given emotional state, and how that state might alter their reactions to the environment. Existing work has focused on reactive models of behavior and does not, as of yet, provide much insight on how emotion might relate to the construction and execution of complex plans. This article focuses on this later question. I present a model of how agents ap- praise the emotion significance of events that illustrates a complementary relationship between classical planning methods and models of emotion processing. By building on classical planning methods, the model clarifies prior accounts of emotional appraisal and extends these ac- counts to handle the generation and execution of com- plex multi-agent plans.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Andersen, Carl F.; Chong, Waiyian; Josyula, Darsana; Okamoto, Yoshi; Purang, Khemdut; O'Donovan-Anderson, Michael; Perlis, Don
Representations of Dialogue State for Domain and Task Independent Meta-Dialogue Journal Article
In: Electronic Transactions on Artificial Intelligence, vol. 3, pp. 125–152, 1999.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@article{traum_representations_1999,
title = {Representations of Dialogue State for Domain and Task Independent Meta-Dialogue},
author = {David Traum and Carl F. Andersen and Waiyian Chong and Darsana Josyula and Yoshi Okamoto and Khemdut Purang and Michael O'Donovan-Anderson and Don Perlis},
url = {http://ict.usc.edu/pubs/Representations%20of%20Dialogue%20State%20for%20Domain%20and%20Task%20Independent%20Meta-Dialogue.pdf},
year = {1999},
date = {1999-01-01},
journal = {Electronic Transactions on Artificial Intelligence},
volume = {3},
pages = {125–152},
abstract = {We propose a representation of local dialogue context motivated by the need to react appropriately to meta-dialogue, such as various sorts of corrections to the sequence of an instruction and response action. Such contexts includes at least the following aspects: the words and linguistic structures uttered, the domain correlates of those linguistic structures, and plans and actions in response. Each of these is needed as part of the context in order to be able to correctly interpret the range of possible corrections. Partitioning knowledge of dialogue structure in this way may lead to an ability to represent generic dialogue structure (e.g., in the form of axioms), which can be particularized to the domain, topic and content of the dialogue.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
0000
Gratch, Jonathan
Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing Journal Article
In: pp. 9, 0000.
BibTeX | Tags: DTIC, Emotions, Virtual Humans
@article{gratch_emotion_nodate,
title = {Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing},
author = {Jonathan Gratch},
pages = {9},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: pp. 35, 0000.
Abstract | BibTeX | Tags: DTIC, MedVR, Virtual Humans, VR
@article{hartholt_combat_nodate,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Arno Hartholt and Sharon Mozgai},
pages = {35},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
keywords = {DTIC, MedVR, Virtual Humans, VR},
pubstate = {published},
tppubtype = {article}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 0000.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, UARC, Virtual Humans
@article{gervits_classication-based_nodate,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {ARL, Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Rizzo, Albert A; Hartholt, Arno
Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application Proceedings Article
In: 0000.
Abstract | BibTeX | Tags: Virtual Humans, VR
@inproceedings{mozgai_persuasive_nodate,
title = {Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application},
author = {Sharon Mozgai and Albert A Rizzo and Arno Hartholt},
abstract = {We are demoing Battle Buddy, an mHealth application designed to support access to physical and mental wellness content as well as safety planning for U.S. military veterans. This virtual human interface will collect multimodal data through passive sensors native to popular wearables (e.g., Apple Watch) and deliver adaptive multimedia content specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Battle Buddy can deliver health interventions matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). All interactions were specifically designed to engage and motivate by employing the persuasive strategies of (1) personalization, (2) self-monitoring, (3) tunneling, (4) suggestion, and (5) expertise.},
keywords = {Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}