Publications
Search
Gratch, Jonathan; Lucas, Gale; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli; Nichols, Jeffrey
GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{gratch_goaalll_2015,
title = {GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion},
author = {Jonathan Gratch and Gale Lucas and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler and Jeffrey Nichols},
url = {http://ict.usc.edu/pubs/GOAALLL!%20Using%20Sentiment%20in%20the%20World%20Cup%20to%20Explore%20Theories%20of%20Emotion.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Sporting events evoke strong emotions amongst fans and thus act as natural laboratories to explore emotions and how they unfold in the wild. Computational tools, such as sentiment analysis, provide new ways to examine such dynamic emotional processes. In this article we use sentiment analysis to examine tweets posted during 2014 World Cup. Such analysis gives insight into how people respond to highly emotional events, and how these emotions are shaped by contextual factors, such as prior expectations, and how these emotions change as events unfold overtime. Here we report on some preliminary analysis of a World Cup twitter corpus using sentiment analysis techniques. We show these tools can give new insights into existing theories of what makes a sporting match exciting. This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion. We also discuss some challenges that such data present for existing sentiment analysis techniques and discuss future analysis.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale; Gratch, Jonathan; Scherer, Stefan; Boberg, Jill; Stratou, Giota
Towards an Affective Interface for Assessment of Psychological Distress Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{lucas_towards_2015,
title = {Towards an Affective Interface for Assessment of Psychological Distress},
author = {Gale Lucas and Jonathan Gratch and Stefan Scherer and Jill Boberg and Giota Stratou},
url = {http://ict.usc.edu/pubs/Towards%20an%20Affective%20Interface%20for%20Assessment%20of%20Psychological%20Distress.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Even with the rise in use of TeleMedicine for health care and mental health, research suggests that clinicians may have difficulty reading nonverbal cues in computer-mediated situations. However, the recent progress in tracking affective markers (i.e., displays of emotional expressions on face and in voice) has opened the door to new clinical applications that might help health care providers better read nonverbal behaviors when employing TeleMedicine. For example, an interface that automatically quantified affective markers could assist clinicians in their assessment of and treatment for psychological distress (i.e., symptoms of depression and PTSD). To move towards this prospect, we will show that clinicians’ judgments of these nonverbal affective markers (e.g., smile, frown, eye contact, tense voice) could be informed by such technology. The results of our evaluation suggest that clinicians’ ratings of nonverbal affective markers are less predictive of psychological distress than automatically quantified affective markers. Because such quantifications are more strongly associated with psychological distress than clinician ratings of these same nonverbal behaviors, an affective interface providing quantifications of nonverbal affective markers could potentially improve assessment of psychological distress.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Wortwein, Torsten; Morency, Louis-Philippe; Shapiro, Ari; Scherer, Stefan
Exploring Feedback Strategies to Improve Public Speaking: An Interactive Virtual Audience Framework Proceedings Article
In: Proceedings of UbiComp 2015, ACM, Osaka, Japan, 2015.
@inproceedings{chollet_exploring_2015,
title = {Exploring Feedback Strategies to Improve Public Speaking: An Interactive Virtual Audience Framework},
author = {Mathieu Chollet and Torsten Wortwein and Louis-Philippe Morency and Ari Shapiro and Stefan Scherer},
url = {http://ict.usc.edu/pubs/Exploring%20Feedback%20Strategies%20to%20Improve%20Public%20Speaking%20-%20An%20Interactive%20Virtual%20Audience%20Framework.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of UbiComp 2015},
publisher = {ACM},
address = {Osaka, Japan},
abstract = {Good public speaking skills convey strong and effective communication, which is critical in many professions and used in everyday life. The ability to speak publicly requires a lot of training and practice. Recent technological developments enable new approaches for public speaking training that allow users to practice in a safe and engaging environment. We explore feedback strategies for public speaking training that are based on an interactive virtual audience paradigm. We investigate three study conditions: (1) a non-interactive virtual audience (control condition), (2) direct visual feedback, and (3) nonverbal feedback from an interactive virtual audience. We perform a threefold evaluation based on self-assessment questionnaires, expert assessments, and two objectively annotated measures of eye-contact and avoidance of pause fillers. Our experiments show that the interactive virtual audience brings together the best of both worlds: increased engagement and challenge as well as improved public speaking skills as judged by experts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hiraoka, Takuya; Georgila, Kallirroi; Nouri, Elnaz; Traum, David; Nakamura, Satoshi
Reinforcement Learning in Multi-Party Trading Dialog Proceedings Article
In: Proceeding of SIGDIAL 2015, pp. 32 – 41, Prague, Czech Republic, 2015.
@inproceedings{hiraoka_reinforcement_2015,
title = {Reinforcement Learning in Multi-Party Trading Dialog},
author = {Takuya Hiraoka and Kallirroi Georgila and Elnaz Nouri and David Traum and Satoshi Nakamura},
url = {http://ict.usc.edu/pubs/Reinforcement%20Learning%20in%20Multi-Party%20Trading%20Dialog.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of SIGDIAL 2015},
pages = {32 – 41},
address = {Prague, Czech Republic},
abstract = {In this paper, we apply reinforcement learning (RL) to a multi-party trading scenario where the dialog system (learner) trades with one, two, or three other agents.We experiment with different RL algorithms and reward functions. The negotiation strategy of the learner is learned through simulated dialog with trader simulators. In our experiments, we evaluate how the performance of the learner varies depending on the RL algorithm used and the number of traders. Our results show that (1) even in simple multi-party trading dialog tasks, learning an effective negotiation policy is a very hard problem; and (2) the use of neural fitted Q iteration combined with an incremental reward function produces negotiation policies as effective or even better than the policies of two strong hand-crafted baselines.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Eskenazi, Maxine; Black, Alan W.; Lee, Sungjin; Traum, David
THE REAL CHALLENGE 2014: PROGRESS AND PROSPECTS Proceedings Article
In: Proceeding of SIGDIAL 2015, pp. 209 – 216, 2015.
@inproceedings{eskenazi_real_2015,
title = {THE REAL CHALLENGE 2014: PROGRESS AND PROSPECTS},
author = {Maxine Eskenazi and Alan W. Black and Sungjin Lee and David Traum},
url = {http://ict.usc.edu/pubs/THE%20REAL%20CHALLENGE%202014-PROGRESS%20AND%20PROSPECTS.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of SIGDIAL 2015},
pages = {209 – 216},
abstract = {The REAL Challenge took place for the first time in 2014, with a long term goal of creating streams of real data that the research community can use, by fostering the creation of systems that are capable of attracting real users. A novel approach is to have high school and undergraduate students devise the types of applications that would attract many real users and that need spoken interaction. The projects are presented to researchers from the spoken dialog research community and the researchers and students work together to refine and develop the ideas. Eleven projects were presented at the first workshop. Many of them have found mentors to help in the next stages of the projects. The students have also brought out issues in the use of speech for real applications. Those issues involve privacy and significant personalization of the applications. While long-term impact of the challenge remains to be seen, the challenge has already been a success at its immediate aims of bringing new ideas and new researchers into the community, and serves as a model for related outreach efforts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Sayan; Laksana, Eugene; Scherer, Stefan; Morency, Louis-Philippe
A Multi-label Convolutional Neural Network Approach to Cross-Domain Action Unit Detection Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{ghosh_multi-label_2015,
title = {A Multi-label Convolutional Neural Network Approach to Cross-Domain Action Unit Detection},
author = {Sayan Ghosh and Eugene Laksana and Stefan Scherer and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/A%20Multi-label%20Convolutional%20Neural%20Network%20Approach%20to%20Cross-Domain%20Action%20Unit%20Detection.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Action Unit (AU) detection from facial images is an important classification task in affective computing. However most existing approaches use carefully engineered feature extractors along with off-the-shelf classifiers. There has also been less focus on how well classifiers generalize when tested on different datasets. In our paper, we propose a multi-label convolutional neural network approach to learn a shared representation between multiple AUs directly from the input image. Experiments on three AU datasets- CK+, DISFA and BP4D indicate that our approach obtains competitive results on all datasets. Cross-dataset experiments also indicate that the network generalizes well to other datasets, even when under different training and testing conditions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
People Show Envy, Not Guilt, when Making Decisions with Machines Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{de_melo_people_2015,
title = {People Show Envy, Not Guilt, when Making Decisions with Machines},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/People%20Show%20Envy,%20Not%20Guilt,%20when%20Making%20Decisions%20with%20Machines.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Research shows that people consistently reach more efficient solutions than those predicted by standard economic models, which assume people are selfish. Artificial intelligence, in turn, seeks to create machines that can achieve these levels of efficiency in human-machine interaction. However, as reinforced in this paper, people’s decisions are systematically less efficient – i.e., less fair and favorable – with machines than with humans. To understand the cause of this bias, we resort to a wellknown experimental economics model: Fehr and Schmidt’s inequity aversion model. This model accounts for people’s aversion to disadvantageous outcome inequality (envy) and aversion to advantageous outcome inequality (guilt). We present an experiment where participants engaged in the ultimatum and dictator games with human or machine counterparts. By fitting this data to Fehr and Schmidt’s model, we show that people acted as if they were just as envious of humans as of machines; but, in contrast, people showed less guilt when making unfavorable decisions to machines. This result, thus, provides critical insight into this bias people show, in economic settings, in favor of humans. We discuss implications for the design of machines that engage in social decision making with humans.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wortwein, Torsten; Morency, Louis-Philippe; Scherer, Stefan
Automatic Assessment and Analysis of Public Speaking Anxiety: A Virtual Audience Case Study Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{wortwein_automatic_2015,
title = {Automatic Assessment and Analysis of Public Speaking Anxiety: A Virtual Audience Case Study},
author = {Torsten Wortwein and Louis-Philippe Morency and Stefan Scherer},
url = {http://ict.usc.edu/pubs/Automatic%20Assessment%20and%20Analysis%20of%20Public%20Speaking%20Anxiety%20-%20A%20Virtual%20Audience%20Case%20Study.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Public speaking has become an integral part of many professions and is central to career building opportunities. Yet, public speaking anxiety is often referred to as the most common fear in everyday life and can hinder one’s ability to speak in public severely. While virtual and real audiences have been successfully utilized to treat public speaking anxiety in the past, little work has been done on identifying behavioral characteristics of speakers suffering from anxiety. In this work, we focus on the characterization of behavioral indicators and the automatic assessment of public speaking anxiety. We identify several indicators for public speaking anxiety, among them are less eye contact with the audience, reduced variability in the voice, and more pauses. We automatically assess the public speaking anxiety as reported by the speakers through a self-assessment questionnaire using a speaker independent paradigm. Our approach using ensemble trees achieves a high correlation between ground truth and our estimation (r=0.825). Complementary to automatic measures of anxiety, we are also interested in speakers’ perceptual differences when interacting with a virtual audience based on their level of anxiety in order to improve and further the development of virtual audiences for the training of public speaking and the reduction of anxiety.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale; Gratch, Jonathan; Rosenfeld, Avi
Saying YES! The Cross-cultural Complexities of Favors and Trust in Human-Agent Negotiation Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{mell_saying_2015,
title = {Saying YES! The Cross-cultural Complexities of Favors and Trust in Human-Agent Negotiation},
author = {Johnathan Mell and Gale Lucas and Jonathan Gratch and Avi Rosenfeld},
url = {http://ict.usc.edu/pubs/Saying%20YES!%20The%20Cross-cultural%20Complexities%20of%20Favors%20and%20Trust%20in%20Human-Agent%20Negotiation.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Negotiation between virtual agents and humans is a complex field that requires designers of systems to be aware not only of the efficient solutions to a given game, but also the mechanisms by which humans create value over multiple negotiations. One way of considering the agent’s impact beyond a single negotiation session is by considering the use of external “ledgers” across multiple sessions. We present results that describe the effects of favor exchange on negotiation outcomes, fairness, and trust for two distinct cross-cultural populations, and illustrate the ramifications of their similarities and differences on virtual agent design.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Hoegen, Rens; Lucas, Gale; Gratch, Jonathan
Emotional Signaling in a Social Dilemma: an Automatic Analysis Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{stratou_emotional_2015,
title = {Emotional Signaling in a Social Dilemma: an Automatic Analysis},
author = {Giota Stratou and Rens Hoegen and Gale Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emotional%20Signaling%20in%20a%20Social%20Dilemma-an%20Automatic%20Analysis.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Emotional signaling plays an important role in negotiations and other social decision-making tasks as it can signal intention and shape joint decisions. Specifically it has been shown to influence cooperation or competition. This has been shown in previous studies for scripted interactions that control emotion signaling and rely on manual coding of affect. In this work we examine face-to-face interactions in an iterative social dilemma task (prisoner’s dilemma) via an automatic framework for facial expression analysis. We explore if automatic analysis of emotion can give insight into the social function of emotion in face-toface interactions. Our analysis suggests that positive and negative displays of emotion are associated with more prosocial and proself game acts respectively. Moreover signaling cooperative intentions to the opponent via positivity can leave participants more open to exploitation, whereas signaling a more tough stance via negativity seems to discourage exploitation. However, the benefit of negative affect is short-term and both players do worse over time if they show negative emotions},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazari, Zahra; Lucas, Gale; Gratch, Jonathan
Multimodal Approach for Automatic Recognition of Machiavellianism Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{nazari_multimodal_2015,
title = {Multimodal Approach for Automatic Recognition of Machiavellianism},
author = {Zahra Nazari and Gale Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Multimodal%20Approach%20for%20Automatic%20Recognition%20of%20Machiavellianism.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Machiavellianism, by definition, is the tendency to use other people as a tool to achieve one's own goals. Despite the large focus on the Big Five traits of personality, this anti-social trait is relatively unexplored in the computational realm. Automatically recognizing anti-social traits can have important uses across a variety of applications. In this paper, we use negotiation as a setting that provides Machiavellians with the opportunity to reveal their exploitative inclinations. We use textual, visual, acoustic, and behavioral cues to automatically predict High vs. Low Machiavellian personalities. These learned models have good accuracy when compared with other personalityrecognition methods, and we provide evidence that the automatically-learned models are consistent with existing literature on this anti-social trait, giving evidence that these results can generalize to other domains.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
The Appraisal Equivalence Hypothesis: Verifying the domain-independence of a computational model of emotion dynamics Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{gratch_appraisal_2015,
title = {The Appraisal Equivalence Hypothesis: Verifying the domain-independence of a computational model of emotion dynamics},
author = {Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/The%20Appraisal%20Equivalence%20Hypothesis-Verifying%20the%20domain-independence%20of%20a%20computational%20model%20of%20emotion%20dynamics.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Appraisal theory is the most influential theory within affective computing, and serves as the basis for several computational models of emotion. The theory makes strong claims of domain-independence: seemingly different situations, both within and across domains are claimed to produce the identical emotional responses if and only if they are appraised the same way. This article tests this claim, and the predictions of a computational model that embodies it, in two very different interactive games. The results extend prior empirical evidence for appraisal theory to situations where emotions unfold and change over time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Georgila, Kallirroi; Artstein, Ron; Leuski, Anton
Evaluating Spoken Dialogue Processing for Time-Offset Interaction Proceedings Article
In: Proceedings of 16th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL), pp. 199–208, Association for Computational Linguistics, Prague, Czech Republic, 2015, ISBN: 978-1-941643-75-4.
@inproceedings{traum_evaluating_2015,
title = {Evaluating Spoken Dialogue Processing for Time-Offset Interaction},
author = {David Traum and Kallirroi Georgila and Ron Artstein and Anton Leuski},
url = {http://ict.usc.edu/pubs/Evaluating%20Spoken%20Dialogue%20Processing%20for%20Time-Offset%20Interaction.pdf},
isbn = {978-1-941643-75-4},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of 16th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL)},
pages = {199–208},
publisher = {Association for Computational Linguistics},
address = {Prague, Czech Republic},
abstract = {This paper presents the first evaluation of a full automated prototype system for time-offset interaction, that is, conversation between a live person and recordings of someone who is not temporally co-present. Speech recognition reaches word error rates as low as 5% with general purpose language models and 19% with domain-specific models, and language understanding can identify appropriate direct responses to 60–66% of user utterances while keeping errors to 10–16% (the remainder being indirect, or off-topic responses). This is sufficient to enable a natural flow and relatively open-ended conversations, with a collection of under 2000 recorded statements.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan; Evangelista, Edgar; New, Raymond; Campbell, Julia; Richmond, Todd; McGroarty, Christopher; Vogt, Brian
Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping Proceedings Article
In: Proceeding of 15 Simulation Interoperability Workshop, Orlando, FL, 2015.
@inproceedings{spicer_innovation_2015,
title = {Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping},
author = {Ryan Spicer and Edgar Evangelista and Raymond New and Julia Campbell and Todd Richmond and Christopher McGroarty and Brian Vogt},
url = {http://ict.usc.edu/pubs/Innovation%20and%20Rapid%20Evolutionary%20Design%20by%20Virtual%20Doing-Understanding%20Early%20Synthetic.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of 15 Simulation Interoperability Workshop},
address = {Orlando, FL},
abstract = {The proliferation and maturation of tools supporting virtual environments combined with emerging immersive capabilities (e.g. Oculus Rift and other head mounted displays) point towards the ability to take nascent ideas and realize them in engaging ways through an Early Synthetic Prototyping (ESP) system. In effect, “bend electrons before bending metal,” enabling Soldier (end-user) feedback early in the design process, while fostering an atmosphere of collaboration and innovation. Simulation has been used in a variety of ways for concept, design, and testing, but current methods do not put the user into the system in ways that provide deep feedback and enable a dialogue between Warfighter and Engineer (as well as other stakeholders) that can inform design. This paper will discuss how the process of ESP is teased out by using iterative rapid virtual prototyping based on an initial ESP schema, resulting in a rather organic design process – Innovation and Rapid Evolutionary Design by Virtual Doing. By employing canonical use cases, working through the draft schema allows the system to help design itself and inform the process evolution. This type of self-referential meta-design becomes increasingly powerful and relevant given the ability to rapidly create assets, capabilities and environments that immerse developers, stakeholders, and end users early and often in the process. Specific examples of using rapid virtual prototyping for teasing out the design and implications/applications of ESP will be presented, walking through the evolution of both schema and prototypes with specific use cases. In addition, this paper will cover more generalized concepts, approaches, analytics, and lessons-learned as well as implications for innovation throughout research, development, and industry.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Morency, Louis-Philippe; DeVault, David; Hartholt, Arno; Fast, Edward; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Stacy, Marcella; Traum, David; Rizzo, Albert
A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews Proceedings Article
In: Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on, pp. 787–789, IEEE, Xi'an, China, 2015.
@inproceedings{stratou_demonstration_2015,
title = {A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews},
author = {Giota Stratou and Louis-Philippe Morency and David DeVault and Arno Hartholt and Edward Fast and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Marcella Stacy and David Traum and Albert Rizzo},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7344661},
doi = {10.1109/ACII.2015.7344661},
year = {2015},
date = {2015-09-01},
booktitle = {Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on},
pages = {787–789},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. With this demo, we focus our attention on the perception part of the system, a multimodal framework which captures and analyzes user state behavior for both behavioral understanding and interactional purposes. We will demonstrate real-time user state sensing as a part of the SimSensei architecture and discuss how this technology enabled automatic analysis of behaviors related to psychological distress.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Georgila, Kallirroi; Traum, David
Which Synthetic Voice Should I Choose for an Evocative Task? Proceedings Article
In: Proceeding of SIGDIAL 2015, pp. 105 – 113, Prague, Czech Republic, 2015.
@inproceedings{pincus_which_2015,
title = {Which Synthetic Voice Should I Choose for an Evocative Task?},
author = {Eli Pincus and Kallirroi Georgila and David Traum},
url = {http://ict.usc.edu/pubs/Which%20Synthetic%20Voice%20Should%20I%20Choose%20for%20an%20Evocative%20Task.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of SIGDIAL 2015},
pages = {105 – 113},
address = {Prague, Czech Republic},
abstract = {We explore different evaluation methods for 4 different synthetic voices and 1 human voice. We investigate whether intelligibility, naturalness, or likability of a voice is correlated to the voice’s evocative function potential, a measure of the voice’s ability to evoke an intended reaction from the listener. We also investigate the extent to which naturalness and likability ratings vary depending on whether or not exposure to a voice is extended and continuous vs. short-term and sporadic (interleaved with other voices). Finally, we show that an automatic test can replace the standard intelligibility tests for text-to-speech (TTS) systems, which eliminates the need to hire humans to performtranscription tasks saving both time and money.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Paetzel, Maike; Manuvinakurike, Ramesh; DeVault, David
"So, which one is it?" The effect of alternative incremental architectures in a high-performance game-playing agent Proceedings Article
In: Proceedings of SIGDIAL 2015, pp. 77 – 86, Prague, Czech Republic, 2015.
@inproceedings{paetzel_so_2015,
title = {"So, which one is it?" The effect of alternative incremental architectures in a high-performance game-playing agent},
author = {Maike Paetzel and Ramesh Manuvinakurike and David DeVault},
url = {http://ict.usc.edu/pubs/So,%20which%20one%20is%20it%20-%20The%20effect%20of%20alternative%20incremental%20architectures%20in%20a%20high-performance%20game-playing%20agent.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of SIGDIAL 2015},
pages = {77 – 86},
address = {Prague, Czech Republic},
abstract = {This paper introduces Eve, a highperformance agent that plays a fast-paced image matching game in a spoken dialogue with a human partner. The agent can be optimized and operated in three different modes of incremental speech processing that optionally include incremental speech recognition, language understanding, and dialogue policies. We present our framework for training and evaluating the agent’s dialogue policies. In a user study involving 125 human participants, we evaluate three incremental architectures against each other and also compare their performance to human-human gameplay. Our study reveals that the most fully incremental agent achieves game scores that are comparable to those achieved in human-human gameplay, are higher than those achieved by partially and nonincremental versions, and are accompanied by improved user perceptions of efficiency, understanding of speech, and naturalness of interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Papangelis, Alexandros; Georgila, Kallirroi
Reinforcement learning of multi-issue negotiation dialogue policies Proceedings Article
In: Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 154–158, Association for Computational Linguistics, Prague, Czech Republic, 2015.
@inproceedings{papangelis_reinforcement_2015,
title = {Reinforcement learning of multi-issue negotiation dialogue policies},
author = {Alexandros Papangelis and Kallirroi Georgila},
url = {http://www.aclweb.org/anthology/W15-4621},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {154–158},
publisher = {Association for Computational Linguistics},
address = {Prague, Czech Republic},
abstract = {We use reinforcement learning (RL) to learn a multi-issue negotiation dialogue policy. For training and evaluation, we build a hand-crafted agenda-based policy, which serves as the negotiation partner of the RL policy. Both the agendabased and the RL policies are designed to work for a large variety of negotiation settings, and perform well against negotiation partners whose behavior has not been observed before. We evaluate the two models by having them negotiate against each other under various settings. The learned model consistently outperforms the agenda-based model. We also ask human raters to rate negotiation transcripts between the RL policy and the agenda-based policy, regarding the rationality of the two negotiators. The RL policy is perceived as more rational than the agenda-based policy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Leuski, Anton; Marsella, Stacy; Casas, Dan; Kang, Sin-Hwa; Shapiro, Ari
A Platform for Building Mobile Virtual Humans Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 310–319, Springer, Delft, Netherlands, 2015.
@inproceedings{feng_platform_2015,
title = {A Platform for Building Mobile Virtual Humans},
author = {Andrew Feng and Anton Leuski and Stacy Marsella and Dan Casas and Sin-Hwa Kang and Ari Shapiro},
url = {http://ict.usc.edu/pubs/A%20Platform%20for%20Building%20Mobile%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {310--319},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {We describe an authoring framework for developing virtual humans on mobile applications. The framework abstracts many elements needed for virtual human generation and interaction, such as the rapid development of nonverbal behavior, lip syncing to speech, dialogue management, access to speech transcription services, and access to mobile sensors such as the microphone, gyroscope and location components.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso; Gratch, Jonathan
Beyond Believability: Quantifying the Differences Between Real and Virtual Humans Proceedings Article
In: Proceedings of IVA 2015, pp. 109 – 118, Springer, Delft, Netherlands, 2015.
@inproceedings{de_melo_beyond_2015,
title = {Beyond Believability: Quantifying the Differences Between Real and Virtual Humans},
author = {Celso Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Beyond%20Believability%20-%20Quantifying%20the%20Differences%20between%20Real%20and%20Virtual%20Humans.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of IVA 2015},
pages = {109 – 118},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {“Believable” agents are supposed to “suspend the audience’s disbelief” and provide the “illusion of life”. However, beyond such high-level definitions, which are prone to subjective interpretation, there is not much more to help researchers systematically create or assess whether their agents are believable. In this paper we propose a more pragmatic and useful benchmark than believability for designing virtual agents. This benchmark requires people, in a specific social situation, to act with the virtual agent in the same manner as they would with a real human. We propose that perceptions of mind in virtual agents, especially pertaining to agency – the ability to act and plan – and experience – the ability to sense and feel emotion – are critical for achieving this new benchmark. We also review current computational systems that fail, pass, and even surpass this benchmark and show how a theoretical framework based on perceptions of mind can shed light into these systems. We also discuss a few important cases where it is better if virtual humans do not pass the benchmark. We discuss implications for the design of virtual agents that can be as natural and efficient to interact with as real humans.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2004
Traum, David; Robinson, Susan; Stephan, Jens
Evaluation of multi-party virtual reality dialogue interaction Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_evaluation_2004,
title = {Evaluation of multi-party virtual reality dialogue interaction},
author = {David Traum and Susan Robinson and Jens Stephan},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20multi-party%20virtual%20reality%20dialogue%20interaction.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {We describe a dialogue evaluation plan for a multi-character virtual reality training simulation. A multi-component evaluation plan is presented, including user satisfaction, intended task completion, recognition rate, and a new annotation scheme for appropriateness. Preliminary results for formative tests are also presented.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Marsella, Stacy C.; Gratch, Jonathan
Emotion and Dialogue in the MRE Virtual Humans Proceedings Article
In: Lecture Notes in Computer Science, pp. 117–127, Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_emotion_2004,
title = {Emotion and Dialogue in the MRE Virtual Humans},
author = {David Traum and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emotion%20and%20Dialogue%20in%20the%20MRE%20Virtual%20Humans.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Lecture Notes in Computer Science},
volume = {3068},
pages = {117–127},
address = {Kloster Irsee, Germany},
abstract = {We describe the emotion and dialogue aspects of the virtual agents used in the MRE project at USC. The models of emotion and dialogue started independently, though each makes crucial use of a central task model. In this paper we describe the task model, dialogue model, and emotion model, and the interactions between them.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Hyeok-Soo; Gratch, Jonathan
A Planner-Independent Collaborative Planning Assistant Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 766–773, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_planner-independent_2004,
title = {A Planner-Independent Collaborative Planning Assistant},
author = {Hyeok-Soo Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Planner-Independent%20Collaborative%20Planning%20Assistant.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {766–773},
address = {New York, NY},
abstract = {This article introduces a novel approach to the problem of collaborative planning. We present a method that takes classical one-shot planning techniques - that take a fixed set of goals, initial state, and a domain theory - and adapts them to support the incremental, hierarchical and exploratory nature of collaborative planning that occurs between human planners, and that multi-agent planning systems attempt to support. This approach is planner-independent - in that it could be applied to any classical planning technique - and recasts the problem of collaborative planning as a search through a space of possible inputs to a classical planning system. This article outlines the technique and describes its application to the Mission Rehearsal Exercise, a multi-agent training system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Decision-Theoretic Approach to Plan Recognition Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2004, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_decision-theoretic_2004,
title = {Decision-Theoretic Approach to Plan Recognition},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Decision-Theoretic%20Approach%20to%20Plan%20Recognition.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 01.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this report, first we give a survey of the work in plan recognition field, including the evolution of different approaches, their strength and weaknesses. Then we propose two decision-theoretic approaches to plan recognition problem, which explicitly take outcome utilities into consideration. One is an extension within the probabilistic reasoning framework, by adding utility nodes to belief nets. The other is based on maximizing the estimated expected utility of possible plan. Illustrative examples are given to explain the approaches. Finally, we compare the two approaches presented in the report and summarize the work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Gratch, Jonathan; Marsella, Stacy C.
Technical Details of a Domain-independent Framework for Modeling Emotion Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 04.2004, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@techreport{gratch_technical_2004,
title = {Technical Details of a Domain-independent Framework for Modeling Emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Technical%20Details%20of%20a%20Domain-independent%20Framework%20for%20Modeling%20Emotion.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 04.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This technical report elaborates on the technical details of the EMA model of emotional appraisal and coping. It should be seen as an appendix to the journal article on this topic (Gratch & Marsella, to appear)},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David
Issues in Multiparty Dialogues Journal Article
In: Advances in Agent Communication, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{traum_issues_2004,
title = {Issues in Multiparty Dialogues},
author = {David Traum},
editor = {F. Dignum},
url = {http://ict.usc.edu/pubs/Issues%20in%20Multiparty%20Dialogues.pdf},
year = {2004},
date = {2004-01-01},
journal = {Advances in Agent Communication},
abstract = {This article examines some of the issues in representation of, processing, and automated agent participation in natural language dialgue, considering expansion from two-party dialogue to multi-party dialogue. These issues include some regarding the roles agents play in dialogue, interactive factors, and content management factors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mao, Wenji; Gratch, Jonathan
Social Judgment in Multiagent Interactions Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 210–217, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2004,
title = {Social Judgment in Multiagent Interactions},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Judgment%20in%20Multiagent%20Interactions.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {210–217},
address = {New York, NY},
abstract = {Social judgment is a process of social explanation whereby one evaluates which entities deserve credit or blame for multi-agent activities. Such explanations are a key aspect of inference in a social environment and a model of this process can advance several design components of multi-agent systems. Social judgment underlies social planning, social learning, natural language pragmatics and computational model of emotion. Based on psychological attribution theory, this paper presents a computational approach to forming social judgment based on an agents causal knowledge and communicative interactions with other agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
A Domain-independent Framework for Modeling Emotion Journal Article
In: Journal of Cognitive Systems Research, vol. 5, no. 4, pp. 269–306, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_domain-independent_2004,
title = {A Domain-independent Framework for Modeling Emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/A%20Domain-independent%20Framework%20for%20Modeling%20Emotion.pdf},
year = {2004},
date = {2004-01-01},
journal = {Journal of Cognitive Systems Research},
volume = {5},
number = {4},
pages = {269–306},
abstract = {In this article, we show how psychological theories of emotion shed light on the interaction between emotion and cognition, and thus can inform the design of human-like autonomous agents that must convey these core aspects of human behavior. We lay out a general computational framework of appraisal and coping as a central organizing principle for such systems. We then discuss a detailed domain-independent model based on this framework, illustrating how it has been applied to the problem of generating behavior for a significant social training application. The model is useful not only for deriving emotional state, but also for informing a number of the behaviors that must be modeled by virtual humans such as facial expressions, dialogue management, planning, reacting, and social understanding. Thus, the work is of potential interest to models of strategic decision-making, action selection, facial animation, and social intelligence.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Robinson, Susan; Martinovski, Bilyana; Garg, Saurabh; Stephan, Jens; Traum, David
Issues in corpus development for multi-party multi-modal task-oriented dialogue Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_issues_2004,
title = {Issues in corpus development for multi-party multi-modal task-oriented dialogue},
author = {Susan Robinson and Bilyana Martinovski and Saurabh Garg and Jens Stephan and David Traum},
url = {http://ict.usc.edu/pubs/Issues%20in%20corpus%20development%20for%20multi-party%20multi-modal%20task-oriented%20dialogue.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper describes the development of a multi-modal corpus based on multi-party multi-task driven common goal oriented spoken language interaction. The data consists of approximately 10 hours of audio human simulation radio data and nearly 5 hours of video and audio face-to-face sessions between human trainees and virtual agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2003
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Traum, David; Wang, D.
Transonics: A Speech to Speech System for English-Persian Interactions Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop, U.S. Virgin Islands, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{narayanan_transonics_2003,
title = {Transonics: A Speech to Speech System for English-Persian Interactions},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and David Traum and D. Wang},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2003},
date = {2003-12-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop},
address = {U.S. Virgin Islands},
abstract = {In this paper we describe the ï¬rst phase of development of our speech-to-speech system between English and Modern Persian under the DARPA Babylon program. We give an overview of the various system components: the front end ASR, the machine translation system and the speech generation system. Challenges such as the sparseness of available spoken language data and solutions that have been employed to maximize the obtained beneï¬ts from using these limited resources are examined. Efforts in the creation of the user interface and the underlying dialog management system for mediated communication are described.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Robinson, Susan; Garg, Saurabh
Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio Proceedings Article
In: Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue), Saarbruecken Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_functions_2003,
title = {Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio},
author = {Bilyana Martinovski and David Traum and Susan Robinson and Saurabh Garg},
url = {http://ict.usc.edu/pubs/Functions%20and%20Patterns%20of%20Speaker%20and%20Addressee%20Identifications%20in%20Distributed%20Complex%20Organizational%20Tasks%20Over%20Radio.pdf},
year = {2003},
date = {2003-09-01},
booktitle = {Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue)},
address = {Saarbruecken Germany},
abstract = {In multiparty dialogue speakers must identify who they are addressing (at least to the addressee, and perhaps to overhearers as well). In non face-toface situations, even the speaker's identity can be unclear. For talk within organizational teams working on critical tasks, such miscommunication must be avoided, and so organizational conventions have been adopted to signal addressee and speaker, (e.g., military radio communications). However, explicit guidelines, such as provided by the military are not always exactly followed (see also (Churcher et al., 1996)). Moreover, even simple actions like identiï¬cations of speaker and hearer can be performed in a variety of ways, for a variety of purposes. The purpose of this paper is to contribute to the understanding and predictability of identiï¬cations of speaker and addressee in radio mediated organization of work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Gratch, Jonathan; Marsella, Stacy C.; Swartout, William; Traum, David
Virtual Humans in the Mission Rehearsal Exercise System Proceedings Article
In: Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents), 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{hill_virtual_2003,
title = {Virtual Humans in the Mission Rehearsal Exercise System},
author = {Randall W. Hill and Jonathan Gratch and Stacy C. Marsella and William Swartout and David Traum},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20in%20the%20Mission%20Rehearsal%20Exercise%20System.pdf},
year = {2003},
date = {2003-06-01},
booktitle = {Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents)},
abstract = {How can simulation be made more compelling and effective as a tool for learning? This is the question that the Institute for Creative Technologies (ICT) set out to answer when it was formed at the University of Southern California in 1999, to serve as a nexus between the simulation and entertainment communities. The ultimate goal of the ICT is to create the Experience Learning System (ELS), which will advance the state of the art in virtual reality immersion through use of high-resolution graphics, immersive audio, virtual humans and story-based scenarios. Once fully realized, ELS will make it possible for participants to enter places in time and space where they can interact with believable characters capable of conversation and action, and where they can observe and participate in events that are accessible only through simulation.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Fight the Way You Train:The Role and Limits of Emotions in Training for Combat Journal Article
In: Brown Journal of World Affairs, vol. X, pp. 63–76, 2003.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_fight_2003,
title = {Fight the Way You Train:The Role and Limits of Emotions in Training for Combat},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Fight%20the%20Way%20You%20Train-The%20Role%20and%20Limits%20of%20Emotions%20in%20Training%20for%20Combat.pdf},
year = {2003},
date = {2003-06-01},
journal = {Brown Journal of World Affairs},
volume = {X},
pages = {63–76},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Traum, David; Fleischman, Michael; Hovy, Eduard
NL Generation for Virtual Humans in a Complex Social Environment Proceedings Article
In: AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue, pp. 151–158, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_nl_2003,
title = {NL Generation for Virtual Humans in a Complex Social Environment},
author = {David Traum and Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/NL%20Generation%20for%20Virtual%20Humans%20in%20a%20Complex%20Social%20Environment.pdf},
year = {2003},
date = {2003-03-01},
booktitle = {AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue},
pages = {151–158},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David
The Error Is the Clue: Breakdown In Human-Machine Interaction Proceedings Article
In: Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association, Switzerland, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_error_2003,
title = {The Error Is the Clue: Breakdown In Human-Machine Interaction},
author = {Bilyana Martinovski and David Traum},
url = {http://ict.usc.edu/pubs/The%20Error%20Is%20the%20Clue-%20Breakdown%20In%20Human-Machine%20Interaction.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association},
address = {Switzerland},
abstract = {This paper focuses not on the detection and correction of specific errors in the interaction between machines and humans, but rather cases of massive deviation from the user's conversational expectations and desires. This can be the result of too many or too unusual errors, but also from dialogue strategies disigned to minimize error, which make the interaction unnatutal in other ways. We study causes of irritation such as over-fragmentation, over-clarity, over-coordination, over-directedness, and repetiveness of verbal action, syntax, and intonation. Human reations to these irritating features typically appear in the following order: tiredness, tolerance, anger, confusion, irony, humor, exhaustion, uncertainty, lack of desire to communicate. The studied features of human expressions of irritation in non-face-to-face interaction are: intonation, emphatic speech, elliptic speech, speed of speech, extra-linguistic signs, speed of verbal action, and overlap.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Larsson, Staffan
The Information State Approach to Dialogue Management Book Section
In: Current and New Directions in Discourse and Dialogue, pp. 325–353, 2003.
Links | BibTeX | Tags: Virtual Humans
@incollection{traum_information_2003,
title = {The Information State Approach to Dialogue Management},
author = {David Traum and Staffan Larsson},
url = {http://ict.usc.edu/pubs/The%20Information%20State%20Approach%20to%20Dialogue%20Management.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Current and New Directions in Discourse and Dialogue},
pages = {325–353},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Traum, David
Semantics and Pragmatics of Questions and Answers for Dialogue Agents Proceedings Article
In: International Workshop on Computational Semantics, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_semantics_2003,
title = {Semantics and Pragmatics of Questions and Answers for Dialogue Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Semantics%20and%20Pragmatics%20of%20Questions%20and%20Answers%20for%20Dialogue%20Agents.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Computational Semantics},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 313–320, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2003,
title = {Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20Coping%20Behavior%20in%20Virtual%20Humans-%20Dont%20worry%20Be%20happy.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {313–320},
address = {Melbourne, Australia},
abstract = {This article builds on insights into how humans cope with emotion to guide the design of virtual humans. Although coping is increasingly viewed in the psychological literature as having a central role in human adaptive behavior, it has been largely ignored in computational models of emotion. In this paper, we show how psychological research on the interplay between human emotion, cognition and coping behavior can serve as a central organizing principle for the behavior of human-like autonomous agents. We present a detailed domain-independent model of coping based on this framework that significantly extends our previous work. We argue that this perspective provides novel insights into realizing adaptive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff; Gratch, Jonathan; Marsella, Stacy C.
Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 441–448, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_negotiation_2003,
title = {Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training},
author = {David Traum and Jeff Rickel and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Negotiation%20over%20Tasks%20in%20Hybrid%20Human-Agent%20Teams%20for%20Simulation-Based%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {441–448},
address = {Melbourne, Australia},
abstract = {The effectiveness of simulation-based training for individual tasks – such as piloting skills – is well established, but its use for team training raises challenging technical issues. Ideally, human users could gain valuable leadership experience by interacting with synthetic teammates in realistic and potentially stressful scenarios. However, creating human-like teammates that can support flexible, natural interactions with humans and other synthetic agents requires integrating a wide variety of capabilities, including models of teamwork, models of human negotiation, and the ability to participate in face-to-face spoken conversations in virtual worlds. We have developed such virtual humans by integrating and extending prior work in these areas, and we have applied our virtual humans to an example peacekeeping training scenario to guide and evaluate our research. Our models allow agents to reason about authority and responsibility for individual actions in a team task and, as appropriate, to carry out actions, give and accept orders, monitor task execution, and negotiate options. Negotiation is guided by the agents' dynamic assessment of alternative actions given the current scenario conditions, with the aim of guiding the human user towards an ability to make similar assessments.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem (Extended Version) Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2003, 2003.
Links | BibTeX | Tags: Virtual Humans
@techreport{mao_social_2003,
title = {The Social Credit Assignment Problem (Extended Version)},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT%20TR%2002%202003.pdf},
year = {2003},
date = {2003-01-01},
number = {ICT TR 02 2003},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Gratch, Jonathan; Mao, Wenji
Automating After Action Review: Attributing Blame or Credit in Team Training Proceedings Article
In: Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation, Scottsdale, AZ, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_automating_2003,
title = {Automating After Action Review: Attributing Blame or Credit in Team Training},
author = {Jonathan Gratch and Wenji Mao},
url = {http://ict.usc.edu/pubs/Automating%20After%20Action%20Review-%20Attributing%20Blame%20or%20Credit%20in%20Team%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation},
address = {Scottsdale, AZ},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA), Kloster Irsee, Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2003-1,
title = {The Social Credit Assignment Problem},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Social%20Credit%20Assignment%20Problem.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
address = {Kloster Irsee, Germany},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Habash, Nizar; Dorr, Bonnie; Traum, David
Hybrid Natural Language Generation from Lexical Conceptual Structures Journal Article
In: Machine Translation, vol. 18, pp. 81–127, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{habash_hybrid_2003,
title = {Hybrid Natural Language Generation from Lexical Conceptual Structures},
author = {Nizar Habash and Bonnie Dorr and David Traum},
url = {http://ict.usc.edu/pubs/Hybrid%20Natural%20Language%20Generation%20from%20Lexical%20%20Conceptual%20Structures.pdf},
year = {2003},
date = {2003-01-01},
journal = {Machine Translation},
volume = {18},
pages = {81–127},
abstract = {This paper describes Lexogen, a system for generating natural-language sentences from Lexical Conceptual Structure, an interlingual representation. The system has been developed as part of a Chinese–English Machine Translation (MT) system; however, it is designed to be used for many other MT language pairs and natural language applications. The contributions of this work include: (1) development of a large-scale Hybrid Natural Language Generation system with language-independent components; (2) enhancements to an interlingual representation and asso- ciated algorithm for generation from ambiguous input; (3) development of an efficient reusable language-independent linearization module with a grammar description language that can be used with other systems; (4) improvements to an earlier algorithm for hierarchically mapping thematic roles to surface positions; and (5) development of a diagnostic tool for lexicon coverage and correct- ness and use of the tool for verification of English, Spanish, and Chinese lexicons. An evaluation of Chinese–English translation quality shows comparable performance with a commercial translation system. The generation system can also be extended to other languages and this is demonstrated and evaluated for Spanish.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2002
Traum, David; Rickel, Jeff
Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_embodied_2002,
title = {Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds},
author = {David Traum and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Embodied%20Agents%20for%20Multi-party%20Dialogue%20in%20Immersive%20%20Virtual%20Worlds.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Influence of Emotion on Belief for Virtual Training Simulations Proceedings Article
In: Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2002,
title = {Modeling the Influence of Emotion on Belief for Virtual Training Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20influence%20of%20emotion.pdf},
year = {2002},
date = {2002-06-01},
booktitle = {Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation},
address = {Orlando, FL},
abstract = {Recognizing and managing emotion in oneself and in those under ones command is an important component of leadership training. Most computational models of emotion have focused on the problem of identifying emotional features of the physical environment and mapping that into motivations to act in the world. But emotions also influence how we perceive the world and how we communicate that perception to others. This paper outlines an initial computational foray into this more vexing problem.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Kim, Youngjun; Gratch, Jonathan
Anticipating where to look: predicting the movements of mobile agents in complex terrain Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 821–827, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hill_anticipating_2002,
title = {Anticipating where to look: predicting the movements of mobile agents in complex terrain},
author = {Randall W. Hill and Youngjun Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Anticipating%20Where%20to%20Look-%20Predicting%20the%20Movements%20of%20Mobile%20Agents%20in%20Complex%20Terrain.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {821–827},
address = {Bologna, Italy},
abstract = {This paper describes a method for making short-term predictions about the movement of mobile agents in complex terrain. Virtual humans need this ability in order to shift their visual attention between dynamic objects-predicting where an object will be located a few seconds in the future facilitates the visual reacquisition of the target object. Our method takes into account environmental cues in making predictions and it also indicates how long the prediction is valid, which varies depending on the context. We implemented this prediction technique in a virtual pilot that flies a helicopter in a synthetic environment.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff; Marsella, Stacy C.; Gratch, Jonathan; Hill, Randall W.; Traum, David; Swartout, William
Toward a New Generation of Virtual Humans for Interactive Experiences Journal Article
In: IEEE Intelligent Systems, 2002.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{rickel_toward_2002,
title = {Toward a New Generation of Virtual Humans for Interactive Experiences},
author = {Jeff Rickel and Stacy C. Marsella and Jonathan Gratch and Randall W. Hill and David Traum and William Swartout},
url = {http://ict.usc.edu/pubs/Toward%20a%20New%20Generation%20of%20Virtual%20Humans%20for%20Interactive%20Experiences.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Marsella, Stacy C.; Gratch, Jonathan
A step toward irrationality: using emotion to change belief Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 334–341, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_step_2002,
title = {A step toward irrationality: using emotion to change belief},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20step%20toward%20irrationality-%20using%20emotion%20to%20change%20belief.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {334–341},
address = {Bologna, Italy},
abstract = {Emotions have a powerful impact on behavior and beliefs. The goal of our research is to create general computational models of this interplay of emotion, cognition and behavior to inform the design of virtual humans. Here, we address an aspect of emotional behavior that has been studied extensively in the psychological literature but largely ignored by computational approaches, emotion-focused coping. Rather than motivating external action, emotion-focused coping strategies alter beliefs in response to strong emotions. For example an individual may alter beliefs about the importance of a goal that is being threatened, thereby reducing their distress. We present a preliminary model of emotion-focused coping and discuss how coping processes, in general, can be coupled to emotions and behavior. The approach is illustrated within a virtual reality training environment where the models are used to create virtual human characters in high-stress social situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Details of the CFOR Planner Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2002, 2002.
Links | BibTeX | Tags: Virtual Humans
@techreport{gratch_details_2002,
title = {Details of the CFOR Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Details%20of%20the%20CFOR%20Planner.pdf},
year = {2002},
date = {2002-01-01},
number = {ICT TR 01.2002},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Gratch, Jonathan; Rickel, Jeff; André, Elisabeth; Cassell, Justine; Petajan, Eric; Badler, Norman
Creating Interactive Virtual Humans: Some Assembly Required Journal Article
In: IEEE Intelligent Systems, pp. 54–63, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{gratch_creating_2002,
title = {Creating Interactive Virtual Humans: Some Assembly Required},
author = {Jonathan Gratch and Jeff Rickel and Elisabeth André and Justine Cassell and Eric Petajan and Norman Badler},
url = {http://ict.usc.edu/pubs/Creating%20Interactive%20Virtual%20Humans-%20Some%20Assembly%20Required.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
pages = {54–63},
abstract = {Science fiction has long imagined a future populated with artificial humans–human-looking devices with human-like intelligence. Although Asimov's benevolent robots and the Terminator movies' terrible war machines are still a distant fantasy, researchers across a wide range of disciplines are beginning to work together toward a more modest goal–building virtual humans. These software entities look and act like people and can engage in conversation and collaborative tasks, but they live in simulated environments. With the untidy problems of sensing and acting in the physical world thus dispensed, the focus of virtual human research is on capturing the richness and dynamics of human behavior.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Traum, David
Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication Proceedings Article
In: Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_ideas_2002,
title = {Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Ideas%20on%20Multi-layer%20Dialogue%20Management%20for%20Multi-party,%20Multi-conversation,%20Multi-modal%20Communication.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2001
Gratch, Jonathan; Douglas, Jay
Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after Proceedings Article
In: Proceedings of International Conference on Virtual Storytelling, pp. 100–112, Avignon, France, 2001, ISBN: 3-540-42611-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_adaptive_2001,
title = {Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after},
author = {Jonathan Gratch and Jay Douglas},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
doi = {10.1007/3-540-45420-9_12},
isbn = {3-540-42611-6},
year = {2001},
date = {2001-10-01},
booktitle = {Proceedings of International Conference on Virtual Storytelling},
pages = {100–112},
address = {Avignon, France},
series = {LNCS},
abstract = {Interacting Storytelling systems integrate AI techniques such as planning with narrative representations to generate stories. In this paper, we discuss the use of planning formalisms in Interactive Storytelling from the perspective of story generation and authoring. We compare two different planning formalisms, Hierarchical Task Network (HTN) planning and Heuristic Search Planning (HSP). While HTN provide a strong basis for narrative coherence in the context of interactivity, HSP offer additional flexibility and the generation of stories and the mechanisms for generating comic situations.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Douglas, Jay; Gratch, Jonathan
Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{douglas_adaptive_2001,
title = {Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After},
author = {Jay Douglas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {Creating dramatic narratives for real-time virtual reality environments is complicated by the lack of temporal distance between the occurrence of an event and its telling in the narrative. This paper describes the application of a multiprocessing operating system architecture to the creation of adaptive narratives, narratives that use autonomous actors or agents to create real-time dramatic experiences for human interactors. We also introduce the notion of dramatic acts and dramatic functions and indicate their use in constructing this real-time drama.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Hill, Randall W.; Gratch, Jonathan; Johnson, W. Lewis; Kyriakakis, Chris; Labore, Catherine; Lindheim, Richard; Marsella, Stacy C.; Miraglia, D.; Moore, Bridget; Morie, Jacquelyn; Rickel, Jeff; Thiebaux, Marcus; Tuch, L.; Whitney, Richard; Douglas, Jay
Toward the Holodeck: Integrating Graphics, Sound, Character and Story Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans, Virtual Worlds
@inproceedings{swartout_toward_2001,
title = {Toward the Holodeck: Integrating Graphics, Sound, Character and Story},
author = {William Swartout and Randall W. Hill and Jonathan Gratch and W. Lewis Johnson and Chris Kyriakakis and Catherine Labore and Richard Lindheim and Stacy C. Marsella and D. Miraglia and Bridget Moore and Jacquelyn Morie and Jeff Rickel and Marcus Thiebaux and L. Tuch and Richard Whitney and Jay Douglas},
url = {http://ict.usc.edu/pubs/Toward%20the%20Holodeck-%20Integrating%20Graphics,%20Sound,%20Character%20and%20Story.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {We describe an initial prototype of a holodeck-like environment that we have created for the Mission Rehearsal Exercise Project. The goal of the project is to create an experience learning system where the participants are immersed in an environment where they can encounter the sights, sounds, and circumstances of realworld scenarios. Virtual humans act as characters and coaches in an interactive story with pedagogical goals.},
keywords = {Social Simulation, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans Proceedings Article
In: Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_effect_2001,
title = {The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affect-%20Modeling%20the%20Impact%20of%20Emotional%20State%20on%20the%20Behavior%20of%20Interactive%20Virtual%20Humans.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents},
address = {Montreal, Canada},
abstract = {A person's behavior provides signiï¬cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a range of cognitive and motor capabilities, a model of emotional appraisal, and a model of the impact of emotional state on physical behavior. We describe the key research issues, our approach, and an initial implementation in an Army peacekeeping scenario.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Modeling Emotions in the Mission Rehearsal Exercise Proceedings Article
In: Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation, pp. 457–466, Orlando, FL, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_modeling_2001,
title = {Modeling Emotions in the Mission Rehearsal Exercise},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Emotions%20in%20the%20Mission%20Rehearsal%20Exercise.pdf},
year = {2001},
date = {2001-05-01},
booktitle = {Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation},
pages = {457–466},
address = {Orlando, FL},
abstract = {This paper discusses our attempts to model realistic human behavior in the context of the Mission Rehearsal Exercise system (MRE), a high-end virtual training environment designed to support dismounted infantry training between a human participant and elements of his command. The system combines immersive graphics, sound, and interactive characters controlled by artificial intelligence programs. Our goal in this paper is to show how some of the daunting subtlety in human behavior can be modeled by intelligent agents and in particular to focus on the role of modeling typical human emotional responses to environmental stimuli.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Proceedings Article
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive 'interlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Olsen, Mari; Traum, David; Ess-Dykema, Carol Van; Weinberg, Amy
Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System Proceedings Article
In: Machine Translation Summit VIII, Santiago de Compostela, Spain, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{olsen_implicit_2001,
title = {Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System},
author = {Mari Olsen and David Traum and Carol Van Ess-Dykema and Amy Weinberg},
url = {http://ict.usc.edu/pubs/Implicit%20Cues%20for%20Explicit%20Generation-%20Using%20Telicity%20as%20a%20Cue%20for%20Tense%20Structure%20in%20Chinese%20to%20English%20MT%20System.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Machine Translation Summit VIII},
address = {Santiago de Compostela, Spain},
abstract = {In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Tense information is crucial to providing accurate and fluent translations into English. Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is optional in Chinese and is not present in the majority of sentences. We report on a project that assesses the relative contribution of the lexical aspect features of (a)telicity reflected in the Lexical Conceptual Structure of the input text, versus more overt aspectual and adverbial markers of tense, to suggest tense structure in the English translation of a Chinese newspaper corpus. Incorporating this information allows a 20% to 35% boost in the accuracy of tense relization with the best accuracy rate of 92% on a corpus of Chinese articles.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, pp. 278–285, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_tears_2001,
title = {Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Tears%20and%20Fears-%20Modeling%20emotions%20and%20emotional%20behaviors%20in%20synthetic%20agents.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
pages = {278–285},
address = {Montreal, Canada},
abstract = {Emotions play a critical role in creating engaging and believable characters to populate virtual worlds. Our goal is to create general computational models to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. In service of this goal, we integrate two complementary approaches to emotional modeling into a single unified system. Gratch's Émile system focuses on the problem of emotional appraisal: how emotions arise from an evaluation of how environmental events relate to an agent's plans and goals. Marsella et al. 's IPD system focuses more on the impact of emotions on behavior, including the impact on the physical expressions of emotional state through suitable choice of gestures and body language. This integrated model is layered atop Steve, a pedagogical agent architecture, and exercised within the context of the Mission Rehearsal Exercise, a prototype system designed to teach decision- making skills in highly evocative situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Damiano, Rossana; Traum, David
Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems Proceedings Article
In: NAACL 2001 Workshop on Adaptation in Dialogue Systems, 2001.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{damiano_anticipatory_2001,
title = {Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems},
author = {Rossana Damiano and David Traum},
url = {http://ict.usc.edu/pubs/Anticipatory%20planning%20for%20decision-theoretic%20grounding%20and%20task%20advancement%20in%20mixed-initiative%20dialogue%20systems.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {NAACL 2001 Workshop on Adaptation in Dialogue Systems},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations Proceedings Article
In: Proceedings of 23rd Annual Conference of the Cognitive Science Society, Edinburgh, Scotland, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2001,
title = {Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20of%20Emotions%20and%20Plans%20in%20Multi-Agent%20Simulations.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 23rd Annual Conference of the Cognitive Science Society},
address = {Edinburgh, Scotland},
abstract = {The goal of this research is to create general computational models of the interplay between affect, cognition and behavior. These models are being designed to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. We attempt to capture both the cognitive and behavioral aspects of emotion, circumscribed to the role emotions play in the performance of concrete physical tasks. We address how emotions arise from an evaluation of the relationship between environmental events and an agent's plans and goals, as well as the impact of emotions on behavior, in particular the impact on the physical expressions of emotional state through suitable choice of gestures and body language. The approach is illustrated within a virtual reality training environment.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2000
Gratch, Jonathan
Human-like behavior, alas, demands human-like intellect Proceedings Article
In: Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents, Barcelona, Spain, 2000.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_human-like_2000,
title = {Human-like behavior, alas, demands human-like intellect},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Human-like%20behavior%20alas%20demands%20human-like%20intellect.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents},
address = {Barcelona, Spain},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Gratch, Jonathan; Rosenbloom, Paul
Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Social Simulation, Virtual Humans
@inproceedings{hill_flexible_2000,
title = {Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces},
author = {Randall W. Hill and Jonathan Gratch and Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Flexible%20Group%20Behavior-%20Virtual%20Commanders%20for%20Synthetic%20Battlespaces.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
address = {Barcelona, Spain},
abstract = {This paper describes a project to develop autonomous commander agents for synthetic battlespaces. The commander agents plan missions, monitor their execution, and replan when necessary. To reason about the social aspects of group behavior, the commanders take various social stances that enable them to collaborate with friends, exercise or defer to authority, and thwart their foes. The purpose of this paper is to describe these capabilities and how they came to be through a series of lessons learned while developing autonomous agents for this domain.},
keywords = {CogArch, Cognitive Architecture, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Gratch, Jonathan
How Long Can an Agent Look Away From a Target? Proceedings Article
In: 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_how_2000,
title = {How Long Can an Agent Look Away From a Target?},
author = {Youngjun Kim and Randall W. Hill and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/how%20long%20can%20you%20look%20away%20from%20a%20target.pdf},
year = {2000},
date = {2000-05-01},
booktitle = {9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Situation awareness (SA) is the perception of the elements in the environment within a volume of time and space, the comprehension of their meaning, and the projection of their status in the near future [3]. Although the impact of situation awareness and assessment on humans in complex systems is clear, no one theory for SA has been developed. A critical aspect of the SA problem is that agents must construct an overall view of a dynamically changing world using limited sensor channels. For instance, a (virtual) pilot, who visually tracks the location and direction of several vehicles that he cannot see simultaneously, must shift its visual field of view to scan the environment and to sense the situation involved. How he directs his attention, for how long, and how he efficiently reacquires targets is the central question we address in this paper. We describe the perceptual coordination that helps a virtual pilot efficiently track one or more objects. In SA, it is important for a virtual pilot having a limited visual field of view to gather more information from its environment and to choose appropriate actions to take in the environment without losing the target.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Modeling the Interplay Between Emotion and Decision-Making Proceedings Article
In: Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_modeling_2000,
title = {Modeling the Interplay Between Emotion and Decision-Making},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20Between%20Emotion%20and%20Decision-Making.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Current models of computer-generated forces are limited by their inability to model many of the moderators that influence the performance of real troops in the field such as the effects of stress, emotion, and individual differences. This article discusses an extension to our command and control modeling architecture that begins to address how behavioral moderators influence the command decision-making process. Our Soar-Cfor command architecture was developed under the STOW and ASTT programs to support distributed command and control decision-making in the domain of army aviation planning. We have recently extended this architecture to model how people appraise the emotional significance of events and how these events influence decision making.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Socially Situated Planning Book Section
In: Socially Intelligent Agents, Multiagent Systems, Artificial Societies, and Simulated Organizations, vol. 3, pp. 181–188, AAAI Fall Symposium on Socially Intelligent Agents - The Human in the Loop, North Falmouth, MA, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{gratch_socially_2000,
title = {Socially Situated Planning},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Socially%20Situated%20Planning.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Socially Intelligent Agents, Multiagent Systems, Artificial Societies, and Simulated Organizations},
volume = {3},
pages = {181–188},
address = {AAAI Fall Symposium on Socially Intelligent Agents - The Human in the Loop, North Falmouth, MA},
abstract = {Introduction: Virtual environments such as training simulators and video games do an impressive job at modeling the physical dynamics of synthetic worlds but fall short when modeling the social dynamics of anything but the most impoverished human encounters. Yet the social dimension is at least as important as good graphics for creating an engaging game or effective training tool. Commercial flight simulators accurately model the technical aspects of flight but many aviation disasters arise from social breakdowns: poor management skills in the cockpit, or the effects of stress and emotion. Perhaps the biggest consumer of simulation technology, the U.S. military, identifies unrealistic human and organizational behavior as a major limitation of existing simulation technology (NRC, 1998). And of course the entertainment industry has long recognized the importance of good character, emotional attachment and rich social interactions to "put butts in seats." This article describes a research effort to endow virtual training environments with richer models of social behavior. We have been developing autonomous and semi-autonomous software agents that plan and act while situated in a social network of other entities, human and synthetic (Hill et. al, 1997; Tambe, 1997; Gratch and Hill, 1999). My work has focused on making agents act in an organization and obey social constraints, coordinate their behavior, negotiate conflicts, but also obey their own self-interest and show a range of individual differences in their behavior and willingness to violate social norms, albeit within the relatively narrow context of a specific training exercise.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gratch, Jonathan
Èmile: Marshalling Passions in Training and Education Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, pp. 325–332, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_emile_2000,
title = {Èmile: Marshalling Passions in Training and Education},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emile-%20Marshalling%20Passions%20in%20Training%20and%20Education.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
pages = {325–332},
address = {Barcelona, Spain},
abstract = {Emotional reasoning can be an important contribution to automated tutoring and training systems. This paper describes �mile, a model of emotional reasoning that builds upon existing approaches and significantly generalizes and extends their capabilities. The main contribution is to show how an explicit planning model allows a more general treatment of several stages of the reasoning process. The model supports educational applications by allowing agents to appraise the emotional significance of events as they relate to students' (or their own) plans and goals, model and predict the emotional state of others, and alter behavior accordingly.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
1999
Gratch, Jonathan; Marsella, Stacy C.; Hill, Randall W.; III, LTC George Stone
Deriving Priority Intelligence Requirements for Synthetic Command Entities Proceedings Article
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_deriving_1999,
title = {Deriving Priority Intelligence Requirements for Synthetic Command Entities},
author = {Jonathan Gratch and Stacy C. Marsella and Randall W. Hill and LTC George Stone III},
url = {http://ict.usc.edu/pubs/Deriving%20Priority%20Intelligence%20Requirements%20for%20Synthetic%20Command%20Entities.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {Simulation-based training is using increasingly complex synthetic forces. As more complex multiechelon synthetic forces are employed in simulations, the need for a realistic model of their command and control behavior becomes more urgent. In this paper we discuss one key component of such a model, the autonomous generation and use of priority intelligence requirements within multi-echelon plans.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Hill, Randall W.
Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces Proceedings Article
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_continuous_1999,
title = {Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces},
author = {Jonathan Gratch and Randall W. Hill},
url = {http://ict.usc.edu/pubs/Continuous%20Planning%20and%20Collaboration%20for%20Command%20and%20Control%20in%20Joint%20Synthetic%20Battlespaces.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {In this paper we describe our efforts to model command and control entities for Joint Synthetic Battlespaces. Command agents require a broader repertoire of capabilities than is typically modeled in simulation. They must develop mission plans involving multiple subordinate units, monitor execution, dynamically modify mission plans in response to situational contingencies, collaborate with other decision makers, and deal with a host of organizational issues. We describe our approach to command agent modeling that addresses a number of these issues through its continuous and collaborative approach to mission planning.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Andersen, Carl F.; Chong, Waiyian; Josyula, Darsana; Okamoto, Yoshi; Purang, Khemdut; O'Donovan-Anderson, Michael; Perlis, Don
Representations of Dialogue State for Domain and Task Independent Meta-Dialogue Journal Article
In: Electronic Transactions on Artificial Intelligence, vol. 3, pp. 125–152, 1999.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{traum_representations_1999,
title = {Representations of Dialogue State for Domain and Task Independent Meta-Dialogue},
author = {David Traum and Carl F. Andersen and Waiyian Chong and Darsana Josyula and Yoshi Okamoto and Khemdut Purang and Michael O'Donovan-Anderson and Don Perlis},
url = {http://ict.usc.edu/pubs/Representations%20of%20Dialogue%20State%20for%20Domain%20and%20Task%20Independent%20Meta-Dialogue.pdf},
year = {1999},
date = {1999-01-01},
journal = {Electronic Transactions on Artificial Intelligence},
volume = {3},
pages = {125–152},
abstract = {We propose a representation of local dialogue context motivated by the need to react appropriately to meta-dialogue, such as various sorts of corrections to the sequence of an instruction and response action. Such contexts includes at least the following aspects: the words and linguistic structures uttered, the domain correlates of those linguistic structures, and plans and actions in response. Each of these is needed as part of the context in order to be able to correctly interpret the range of possible corrections. Partitioning knowledge of dialogue structure in this way may lead to an ability to represent generic dialogue structure (e.g., in the form of axioms), which can be particularized to the domain, topic and content of the dialogue.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}