Publications
Search
Choi, Ahyoung; Melo, Celso M.; Khooshabeh, Peter; Woo, Woontack; Gratch, Jonathan
Physiological evidence for a dual process model of the social effects of emotion in computers Journal Article
In: International Journal of Human-Computer Studies, vol. 74, pp. 41–53, 2015, ISSN: 10715819.
@article{choi_physiological_2015,
title = {Physiological evidence for a dual process model of the social effects of emotion in computers},
author = {Ahyoung Choi and Celso M. Melo and Peter Khooshabeh and Woontack Woo and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1071581914001414},
doi = {10.1016/j.ijhcs.2014.10.006},
issn = {10715819},
year = {2015},
date = {2015-02-01},
journal = {International Journal of Human-Computer Studies},
volume = {74},
pages = {41--53},
abstract = {There has been recent interest on the impact of emotional expressions of computers on people's decision making. However, despite a growing body of empirical work, the mechanism underlying such effects is still not clearly understood. To address this issue the paper explores two kinds of processes studied by emotion theorists in human-human interaction: inferential processes, whereby people retrieve information from emotion expressions about other's beliefs, desires, and intentions; affective processes, whereby emotion expressions evoke emotions in others, which then influence their decisions. To tease apart these two processes as they occur in human-computer interaction, we looked at physiological measures (electrodermal activity and heart rate deceleration). We present two experiments where participants engaged in social dilemmas with embodied agents that expressed emotion. Our results show, first, that people's decisions were influenced by affective and cognitive processes and, according to the prevailing process, people behaved differently and formed contrasting subjective ratings of the agents; second we show that an individual trait known as electrodermal lability, which measures people's physiological sensitivity, predicted the extent to which affective or inferential processes dominated the interaction. We discuss implications for the design of embodied agents and decision making systems that use emotion expression to enhance interaction between humans and computers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Corbin, Carina; Morbini, Fabrizio; Traum, David
Creating a Virtual Neighbor Inproceedings
In: Proceedings of International Workshop on Spoken Dialogue Systems, Busan, South Korea, 2015.
@inproceedings{corbin_creating_2015,
title = {Creating a Virtual Neighbor},
author = {Carina Corbin and Fabrizio Morbini and David Traum},
url = {http://ict.usc.edu/pubs/Creating%20a%20Virtual%20Neighbor.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of International Workshop on Spoken Dialogue Systems},
address = {Busan, South Korea},
abstract = {We present the first version of our Virtual Neighbor, who can talk with users about people employed in the same institution. The Virtual Neighbor can discuss information about employees in a medium sized company or institute with users. The system acquires information from three sources: a personnel directory database, public web pages, and through dialogue interaction. Users can interact through face to face spoken dialogue, using components from the ICT Virtual human toolkit, or via a chat interface.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Stratou, Giota; DeVault, David; Hartholt, Arno; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Marsella, Stacy; Traum, David; Rizzo, Albert "Skip"
SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications Inproceedings
In: Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI), Austin, Texas, 2015.
@inproceedings{morency_simsensei_2015,
title = {SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications},
author = {Louis-Philippe Morency and Giota Stratou and David DeVault and Arno Hartholt and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Stacy Marsella and David Traum and Albert "Skip" Rizzo},
url = {http://ict.usc.edu/pubs/SimSensei%20Demonstration%20A%20Perceptive%20Virtual%20Human%20Interviewer%20for%20Healthcare%20Applications.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI)},
address = {Austin, Texas},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. We emphasize on the perception part of the system, a multimodal framework which captures and analyzes user state for both behavioral understanding and interactional purposes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; DeVault, David
Pair Me Up: A Web Framework for Crowd-Sourced Spoken Dialogue Collection Inproceedings
In: Proceedings of IWSDS 2015, pp. 1 –12, Busan, South Korea, 2015.
@inproceedings{manuvinakurike_pair_2015,
title = {Pair Me Up: A Web Framework for Crowd-Sourced Spoken Dialogue Collection},
author = {Ramesh Manuvinakurike and David DeVault},
url = {http://ict.usc.edu/pubs/Pair%20Me%20Up-%20A%20Web%20Framework%20for%20Crowd-Sourced%20Spoken%20Dialogue%20Collection.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of IWSDS 2015},
pages = {1 --12},
address = {Busan, South Korea},
abstract = {We describe and analyze a new web-based spoken dialogue data collection framework. The framework enables the capture of conversational speech from two remote users who converse with each other and play a dialogue game entirely through their web browsers.We report on the substantial improvements in the speed and cost of data capture we have observed with this crowd-sourced paradigm. We also analyze a range of data quality factors by comparing a crowd-sourced data set involving 196 remote users to a smaller but more quality controlled lab-based data set. We focus our comparison on aspects that are especially important in our spoken dialogue research, including audio quality, the effect of communication latency on the interaction, our ability to synchronize the collected data, our ability to collect examples of excellent game play, and the naturalness of the resulting interactions. This analysis illustrates some of the current trade-offs between lab-based and crowd-sourced spoken dialogue data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Scherer, Stefan; Gratch, Jonathan; Carnevale, Peter; Morency, Louis-Philippe
I Can Already Guess Your Answer: Predicting Respondent Reactions During Dyadic Negotiation Journal Article
In: IEEE Transactions on Affective Computing, vol. 6, no. 2, pp. 86 –96, 2015, ISSN: 1949-3045.
@article{park_i_2015,
title = {I Can Already Guess Your Answer: Predicting Respondent Reactions During Dyadic Negotiation},
author = {Sunghyun Park and Stefan Scherer and Jonathan Gratch and Peter Carnevale and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7024926},
doi = {10.1109/TAFFC.2015.2396079},
issn = {1949-3045},
year = {2015},
date = {2015-01-01},
journal = {IEEE Transactions on Affective Computing},
volume = {6},
number = {2},
pages = {86 --96},
abstract = {Negotiation is a component deeply ingrained in our daily lives, and it can be challenging for a person to predict the respondent’s reaction (acceptance or rejection) to a negotiation offer. In this work, we focus on finding acoustic and visual behavioral cues that are predictive of the respondent’s immediate reactions using a face-to-face negotiation dataset, which consists of 42 dyadic interactions in a simulated negotiation setting. We show our results of exploring 4 different sources of information, namely nonverbal behavior of the proposer, that of the respondent, mutual behavior between the interactants related to behavioral symmetry and asymmetry, and past negotiation history between the interactants. Firstly, we show that considering other sources of information (other than the nonverbal behavior of the respondent) can also have comparable performance in predicting respondent reactions. Secondly, we show that automatically extracted mutual behavioral cues of symmetry and asymmetry are predictive partially due to their capturing information of the nature of the interaction itself, whether it is cooperative or competitive. Lastly, we identify audio-visual behavioral cues that are most predictive of the respondent’s immediate reactions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Marsella, Stacy; Gratch, Jonathan
Computationally Modeling Human Emotion Journal Article
In: Communications of the ACM, vol. 57, no. 12, pp. 56–67, 2014.
@article{marsella_computationally_2014,
title = {Computationally Modeling Human Emotion},
author = {Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2631912},
doi = {10.1145/2631912},
year = {2014},
date = {2014-12-01},
journal = {Communications of the ACM},
volume = {57},
number = {12},
pages = {56--67},
abstract = {EMOTION’S ROLE IN human behavior is an old debate that has become increasingly relevant to the computational sciences. Two-and-a-half millennia ago, Aristotle espoused a view of emotion at times remarkably similar to modern psychological theories, arguing that emotions (such as anger), in moderation, play a useful role, especially in interactions with others. Those who express anger at appropriate times are praiseworthy, while those lacking in anger at appropriate times are treated as a fool. The Stoics took a different view; four centuries after Aristotle, Seneca considered emotions (such as anger) as a threat to reason, arguing, “reason … is only powerful so long as it remains isolated from emotions.” In the 8th century, David Hume radically departed from the Stoic perspective, arguing for the key motivating role of emotions, saying, “Reason is, and ought only to be the slave of the passions.” A similar dichotomy of views can be seen in the history of artificial intelligence (AI) and agent research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jon
Virtual Humans for Interpersonal Processes and Skills Training Journal Article
In: AI Matters, vol. 1, no. 2, pp. 24–25, 2014, ISSN: 23723483.
@article{gratch_virtual_2014,
title = {Virtual Humans for Interpersonal Processes and Skills Training},
author = {Jon Gratch},
url = {http://dl.acm.org/citation.cfm?doid=2685328.2685336},
doi = {10.1145/2685328.2685336},
issn = {23723483},
year = {2014},
date = {2014-12-01},
journal = {AI Matters},
volume = {1},
number = {2},
pages = {24--25},
abstract = {Ellie is an interactive virtual human that performs mental health screens via natural language. This kiosk-based system is aimed at clients resistant to seeking traditional care. Research shows that, when such virtual humans interview people about their mental health, they are able to reduce impression management and fear of negative evaluation compared to interviews conducted with a human present. As both impression management and fear of negative evaluation inhibit people from opening up and disclosing personal information, this research also finds that people are more willing to disclose personal information to a virtual human interviewer than human interviewers. These results suggest that automated virtual humans can help overcome significant barriers to obtaining truthful client information during clinical interviews.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert; Scherer, Stefan; DeVault, David; Gratch, Jonathan; Artstein, Ron; Hartholt, Arno; Lucas, Gale; Marsella, Stacy; Morbini, Fabrizio; Nazarian, Angela; Stratou, Giota; Traum, David; Wood, Rachel; Boberg, Jill; Morency, Louis-Philippe
Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent Inproceedings
In: Proceedings of ICDVRAT 2014, International Journal of Disability and Human Development, Gothenburg, Sweden, 2014.
@inproceedings{rizzo_detection_2014,
title = {Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent},
author = {Albert Rizzo and Stefan Scherer and David DeVault and Jonathan Gratch and Ron Artstein and Arno Hartholt and Gale Lucas and Stacy Marsella and Fabrizio Morbini and Angela Nazarian and Giota Stratou and David Traum and Rachel Wood and Jill Boberg and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Detection%20and%20Computational%20Analysis%20of%20Psychological%20Signals%20Using%20a%20Virtual%20Human%20Interviewing%20Agent.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of ICDVRAT 2014},
publisher = {International Journal of Disability and Human Development},
address = {Gothenburg, Sweden},
abstract = {It has long been recognized that facial expressions, body posture/gestures and vocal parameters play an important role in human communication and the implicit signalling of emotion. Recent advances in low cost computer vision and behavioral sensing technologies can now be applied to the process of making meaningful inferences as to user state when a person interacts with a computational device. Effective use of this additive information could serve to promote human interaction with virtual human (VH) agents that may enhance diagnostic assessment. This paper will focus on our current research in these areas within the DARPA-funded “Detection and Computational Analysis of Psychological Signals” project, with specific attention to the SimSensei application use case. SimSensei is a virtual human interaction platform that is able to sense and interpret real-time audiovisual behavioral signals from users interacting with the system. It is specifically designed for health care support and leverages years of virtual human research and development at USC-ICT. The platform enables an engaging face-to-face interaction where the virtual human automatically reacts to the state and inferred intent of the user through analysis of behavioral signals gleaned from facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human to human interaction and communication, SimSensei aims to capture and infer from user non-verbal communication to improve engagement between a VH and a user. The system can also quantify and interpret sensed behavioral signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Calvo, Rafael A.; D'Mello, Sidney; Gratch, Jonathan; Kappas, Arvid (Ed.)
The Oxford Handbook of Affective Computing Book
Oxford University Press, Oxford ; New York, 2014, ISBN: 978-0-19-994223-7.
@book{calvo_oxford_2014,
title = {The Oxford Handbook of Affective Computing},
editor = {Rafael A. Calvo and Sidney D'Mello and Jonathan Gratch and Arvid Kappas},
url = {https://global.oup.com/academic/product/the-oxford-handbook-of-affective-computing-9780199942237?cc=us&lang=en&},
isbn = {978-0-19-994223-7},
year = {2014},
date = {2014-12-01},
publisher = {Oxford University Press},
address = {Oxford ; New York},
abstract = {The Oxford Handbook of Affective Computing aims to be the definite reference for research in the burgeoning field of affective computing—a field that turns 18 at the time of writing. This introductory chapter is intended to convey the motivations of the editors and content of the chapters in order to orient the readers to the handbook. It begins with a very high overview of the field of affective computing along with a bit of reminiscence about its formation, short history, and major accomplishments. The five main sections of the handbook—history and theory, detection, generation, methodologies, and applications—are then discussed, along with a bird’s eye view of the 41 chapters covered in the book. The introduction is devoted to short descriptions of the chapters featured in the handbook. A brief descript of the Glossary concludes the Introduction.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Venek, Verena; Scherer, Stefan; Morency, Louis-Philippe; Rizzo, Albert; Pestian, John
ADOLESCENT SUICIDAL RISK ASSESSMENT IN CLINICIAN-PATIENT INTERACTION: A STUDY OF VERBAL AND ACOUSTIC BEHAVIORS Inproceedings
In: Spoken Language Technology Workshop (SLT), 2014 IEEE, pp. 277–282, IEEE, South Lake Tahoe, NV, 2014, ISBN: 978-1-4799-7129-9.
@inproceedings{venek_adolescent_2014,
title = {ADOLESCENT SUICIDAL RISK ASSESSMENT IN CLINICIAN-PATIENT INTERACTION: A STUDY OF VERBAL AND ACOUSTIC BEHAVIORS},
author = {Verena Venek and Stefan Scherer and Louis-Philippe Morency and Albert Rizzo and John Pestian},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7078587},
doi = {10.1109/SLT.2014.7078587},
isbn = {978-1-4799-7129-9},
year = {2014},
date = {2014-12-01},
booktitle = {Spoken Language Technology Workshop (SLT), 2014 IEEE},
pages = {277--282},
publisher = {IEEE},
address = {South Lake Tahoe, NV},
abstract = {Suicide among adolescents is a major public health problem: it is the third leading cause of death in the US for ages 13-18. Up to now, there is no objective ways to assess the suicidal risk, i.e. whether a patient is non-suicidal, suicidal re-attempter (i.e. repeater) or suicidal non-repeater (i.e. individuals with one suicide attempt or showing signs of suicidal gestures or ideation). Therefore, features of the conversation including verbal information and nonverbal acoustic information were investigated from 60 audio-recorded interviews of 30 suicidal (13 repeaters and 17 non-repeaters) and 30 non-suicidal adolescents interviewed by a social worker. The interaction between clinician and patients was statistically analyzed to reveal differences between suicidal vs. non-suicidal adolescents and to investigate suicidal repeaters' behaviors in comparison to suicidal non-repeaters. By using a hierarchical ensemble classifier we were able to successfully discriminate non-suicidal patients, suicidal repeaters and suicidal non-repeaters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Sayan; Chatterjee, Moitreya; Morency, Louis-Philippe
A Multimodal Context-based Approach for Distress Assessment Inproceedings
In: Proceedings of the 16th International Conference on Multimodal Interaction, pp. 240–246, ACM Press, Istanbul, Turkey, 2014, ISBN: 978-1-4503-2885-2.
@inproceedings{ghosh_multimodal_2014,
title = {A Multimodal Context-based Approach for Distress Assessment},
author = {Sayan Ghosh and Moitreya Chatterjee and Louis-Philippe Morency},
url = {http://dl.acm.org/citation.cfm?doid=2663204.2663274},
doi = {10.1145/2663204.2663274},
isbn = {978-1-4503-2885-2},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the 16th International Conference on Multimodal Interaction},
pages = {240--246},
publisher = {ACM Press},
address = {Istanbul, Turkey},
abstract = {The increasing prevalence of psychological distress disorders, such as depression and post-traumatic stress, necessitates a serious effort to create new tools and technologies to help with their diagnosis and treatment. In recent years, new computational approaches were proposed to objectively analyze patient non-verbal behaviors over the duration of the entire interaction between the patient and the clinician. In this paper, we go beyond non-verbal behaviors and propose a tri-modal approach which integrates verbal behaviors with acoustic and visual behaviors to analyze psychological distress during the course of the dyadic semi-structured interviews. Our approach exploits the advantages of the dyadic nature of these interactions to contextualize the participant responses based on the affective components (intimacy and polarity levels) of the questions. We validate our approach using one of the largest corpus of semi-structured interviews for distress assessment which consists of 154 multimodal dyadic interactions. Our results show significant improvement on distress prediction performance when integrating verbal behaviors with acoustic and visual behaviors. In addition, our analysis shows that contextualizing the responses improves the prediction performance, most significantly with positive and intimate questions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Shim, Han Suk; Chatterjee, Moitreya; Sagae, Kenji; Morency, Louis-Philippe
Computational Analysis of Persuasiveness in Social Multimedia: A Novel Dataset and Multimodal Prediction Approach Inproceedings
In: Proceedings of the 16th International Conference on Multimodal Interaction, pp. 50–57, ACM Press, 2014, ISBN: 978-1-4503-2885-2.
@inproceedings{park_computational_2014,
title = {Computational Analysis of Persuasiveness in Social Multimedia: A Novel Dataset and Multimodal Prediction Approach},
author = {Sunghyun Park and Han Suk Shim and Moitreya Chatterjee and Kenji Sagae and Louis-Philippe Morency},
url = {http://dl.acm.org/citation.cfm?doid=2663204.2663260},
doi = {10.1145/2663204.2663260},
isbn = {978-1-4503-2885-2},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the 16th International Conference on Multimodal Interaction},
pages = {50--57},
publisher = {ACM Press},
abstract = {Our lives are heavily influenced by persuasive communication, and it is essential in almost any types of social interactions from business negotiation to conversation with our friends and family. With the rapid growth of social multimedia websites, it is becoming ever more important and useful to understand persuasiveness in the context of social multimedia content online. In this paper, we introduce our newly created multimedia corpus of 1,000 movie review videos obtained from a social multimedia website called ExpoTV.com, which will be made freely available to the research community. Our research results presented here revolve around the following 3 main research hypotheses. Firstly, we show that computational descriptors derived from verbal and nonverbal behavior can be predictive of persuasiveness. We further show that combining descriptors from multiple communication modalities (audio, text and visual) improve the prediction performance compared to using those from single modality alone. Secondly, we investigate if having prior knowledge of a speaker expressing a positive or negative opinion helps better predict the speaker's persuasiveness. Lastly, we show that it is possible to make comparable prediction of persuasiveness by only looking at thin slices (shorter time windows) of a speaker's behavior.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Hammal, Zakia; Yang, Ying; Morency, Louis-Philippe; Cohn, Jeffrey F.
Dyadic Behavior Analysis in Depression Severity Assessment Interviews Inproceedings
In: Proceedings of the 16th International Conference on Multimodal Interaction, pp. 112–119, ACM Press, Istanbul, Turkey, 2014, ISBN: 978-1-4503-2885-2.
@inproceedings{scherer_dyadic_2014,
title = {Dyadic Behavior Analysis in Depression Severity Assessment Interviews},
author = {Stefan Scherer and Zakia Hammal and Ying Yang and Louis-Philippe Morency and Jeffrey F. Cohn},
url = {http://dl.acm.org/citation.cfm?doid=2663204.2663238},
doi = {10.1145/2663204.2663238},
isbn = {978-1-4503-2885-2},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the 16th International Conference on Multimodal Interaction},
pages = {112--119},
publisher = {ACM Press},
address = {Istanbul, Turkey},
abstract = {Previous literature suggests that depression impacts vocal timing of both participants and clinical interviewers but is mixed with respect to acoustic features. To investigate further, 57 middle-aged adults (men and women) with Major Depression Disorder and their clinical interviewers (all women) were studied. Participants were interviewed for depression severity on up to four occasions over a 21 week period using the Hamilton Rating Scale for Depression (HRSD), which is a criterion measure for depression severity in clinical trials. Acoustic features were extracted for both participants and interviewers using COVAREP Toolbox. Missing data occurred due to missed appointments, technical problems, or insufficient vocal samples. Data from 36 participants and their interviewers met criteria and were included for analysis to compare between high and low depression severity. Acoustic features for participants varied between men and women as expected, and failed to vary with depression severity for participants. For interviewers, acoustic characteristics strongly varied with severity of the interviewee's depression. Accommodation - the tendency of interactants to adapt their communicative behavior to each other - between interviewers and interviewees was inversely related to depression severity. These findings suggest that interviewers modify their acoustic features in response to depression severity, and depression severity strongly impacts interpersonal accommodation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz
Training Agents by Crowds Inproceedings
In: Proceedings of HCOMP 2014, Pittsburgh, PA, 2014.
@inproceedings{nouri_training_2014,
title = {Training Agents by Crowds},
author = {Elnaz Nouri},
url = {http://ict.usc.edu/pubs/Training%20Agents%20by%20Crowds.pdf},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of HCOMP 2014},
address = {Pittsburgh, PA},
abstract = {On-line learning algorithms are particularly suitable for developing interactive computational agents. These algorithm can be used to teach the agents the abilities needed for engaging in social interactions with humans. If humans are used as teachers in the context of on-line learning algorithms a serious challenge arises: their lack of commitment and availability during the required extensive training. In this work we address this challenge by showing how ”crowds of human workers” rather than ”single users” can be recruited as teachers for training each learning agent. This paper proposes a framework for training agents by the crowds. The focus of this proposal is narrowed by using Reinforcement Learning as the human guidance method for teaching agents how to engage in simple negotiation games (such as the Ultimatum Bargaining Game and the Dictator Game).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Trentin, Edmondo; Scherer, Stefan; Schwenker, Friedhelm
Emotion recognition from speech signals via a probabilistic echo-state network Journal Article
In: Pattern Recognition Letters, vol. 66, pp. 4 –12, 2014.
@article{trentin_emotion_2014,
title = {Emotion recognition from speech signals via a probabilistic echo-state network},
author = {Edmondo Trentin and Stefan Scherer and Friedhelm Schwenker},
url = {http://www.sciencedirect.com/science/article/pii/S0167865514003328},
doi = {dx.doi.org/10.1016/j.patrec.2014.10.015},
year = {2014},
date = {2014-11-01},
journal = {Pattern Recognition Letters},
volume = {66},
pages = {4 --12},
abstract = {The paper presents a probabilistic echo-state network (π -ESN) for density estimation over variable-length sequences of multivariate random vectors. The π -ESN stems from the combination of the reservoir of an ESN and a parametric density model based on radial basis functions. A constrained maximum likelihood training algorithm is introduced, suitable for sequence classification. Extensions of the algorithm to unsupervised clustering and semi-supervised learning (SSL) of sequences are proposed. Experiments in emotion recognition from speech signals are conducted on the WaSeP© dataset. Compared with established techniques, the π -ESN yields the highest recognition accuracies, and shows interesting clustering and SSL capabilities.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Feng, Andrew; Lucas, Gale; Marsella, Stacy; Suma, Evan; Chiu, Chung-Cheng; Casas, Dan; Shapiro, Ari
Acting the Part: The Role of Gesture on Avatar Identity Inproceedings
In: Proceedings of the Seventh International Conference on Motion in Games (MIG 2014), pp. 49–54, ACM Press, Playa Vista, CA, 2014, ISBN: 978-1-4503-2623-0.
@inproceedings{feng_acting_2014,
title = {Acting the Part: The Role of Gesture on Avatar Identity},
author = {Andrew Feng and Gale Lucas and Stacy Marsella and Evan Suma and Chung-Cheng Chiu and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2668064.2668102},
doi = {10.1145/2668064.2668102},
isbn = {978-1-4503-2623-0},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the Seventh International Conference on Motion in Games (MIG 2014)},
pages = {49--54},
publisher = {ACM Press},
address = {Playa Vista, CA},
abstract = {Recent advances in scanning technology have enabled the widespread capture of 3D character models based on human subjects. However, in order to generate a recognizable 3D avatar, the movement and behavior of the human subject should be captured and replicated as well. We present a method of generating a 3D model from a scan, as well as a method to incorporate a subjects style of gesturing into a 3D character. We present a study which shows that 3D characters that used the gestural style as their original human subjects were more recognizable as the original subject than those that don’t.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz; Georgila, Kallirroi; Traum, David
Culture-specific models of negotiation for virtual characters: multi-attribute decision-making based on culture-specific values Journal Article
In: Journal of AI & Society 2014, 2014, ISSN: 0951-5666, 1435-5655.
@article{nouri_culture-specific_2014,
title = {Culture-specific models of negotiation for virtual characters: multi-attribute decision-making based on culture-specific values},
author = {Elnaz Nouri and Kallirroi Georgila and David Traum},
url = {http://link.springer.com/10.1007/s00146-014-0570-7},
doi = {10.1007/s00146-014-0570-7},
issn = {0951-5666, 1435-5655},
year = {2014},
date = {2014-10-01},
journal = {Journal of AI & Society 2014},
abstract = {We posit that observed differences in negotiation performance across cultures can be explained by participants trying to optimize across multiple values, where the relative importance of values differs across cultures. We look at two ways for specifying weights on values for different cultures: one in which the weights of the model are hand-crafted, based on intuition interpreting Hofstede dimensions for the cultures, and one in which the weights of the model are learned from data using Inverse Reinforcement Learning (IRL). We apply this model to the Ultimatum Game and integrate it into a virtual human dialogue system. We show that weights learned from IRL surpass both a weak baseline with random weights, and a strong baseline considering only one factor of maximizing gain in own wealth in accounting for the behavior of human players from four different cultures. We also show that the weights learned with our model for one culture outperform weights learned for other cultures when playing against opponents of the first culture.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Scherer, Stefan; Stratou, Giota; Lucas, Gale; Mahmoud, Marwa; Boberg, Jill; Gratch, Jonathan; Rizzo, Albert (Skip); Morency, Louis-Philippe
Automatic audiovisual behavior descriptors for psychological disorder analysis Journal Article
In: Image and Vision Computing Journal, vol. 32, no. 10, pp. 648–658, 2014, ISSN: 02628856.
@article{scherer_automatic_2014,
title = {Automatic audiovisual behavior descriptors for psychological disorder analysis},
author = {Stefan Scherer and Giota Stratou and Gale Lucas and Marwa Mahmoud and Jill Boberg and Jonathan Gratch and Albert (Skip) Rizzo and Louis-Philippe Morency},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0262885614001000},
doi = {10.1016/j.imavis.2014.06.001},
issn = {02628856},
year = {2014},
date = {2014-10-01},
journal = {Image and Vision Computing Journal},
volume = {32},
number = {10},
pages = {648--658},
abstract = {We investigate the capabilities of automatic audiovisual nonverbal behavior descriptors to identify indicators of psychological disorders such as depression, anxiety, and post-traumatic stress disorder. Due to strong correlations between these disordersas measured with standard self-assessment questionnaires in this study, we focus our investigations in particular on a generic distress measure as identified using factor analysis. Within this work, we seek to confirm and enrich present state of the art, predominantly based on qualitative manual annotations, with automatic quantitative behavior descriptors. We propose a number of nonverbal behavior descriptors that can be automatically estimated from audiovisual signals. Such automatic behavior descriptors could be used to support healthcare providers with quantified and objective observations that could ultimately improve clinical assessment. We evaluate our work on the dataset called the Distress Assessment Interview Corpus (DAIC) which comprises dyadic interactions between a confederate interviewer and a paid participant. Our evaluation on this dataset shows correlation of our automatic behavior descriptors with the derived general distress measure. Our analysis also includes a deeper study of self-adaptor and fidgeting behaviors based on detailed annotations of where these behaviors occur.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pincus, Eli; DeVault, David; Traum, David
Mr. Clue - A Virtual Agent that can Play Word-Guessing Games Inproceedings
In: Proceedings of the 3rd Workshop on Games and NLP (GAMNLP-14), Raleigh, NC, 2014.
@inproceedings{pincus_mr_2014,
title = {Mr. Clue - A Virtual Agent that can Play Word-Guessing Games},
author = {Eli Pincus and David DeVault and David Traum},
url = {http://ict.usc.edu/pubs/Mr.%20Clue%20-%20A%20Virtual%20Agent%20that%20can%20Play%20Word-Guessing%20Games.pdf},
year = {2014},
date = {2014-10-01},
booktitle = {Proceedings of the 3rd Workshop on Games and NLP (GAMNLP-14)},
address = {Raleigh, NC},
abstract = {This demonstration showcases a virtual agent, Mr. Clue, capable of acting in the role of clue-giver in a wordguessing game. The agent has the ability to automatically generate clues and update its dialogue policy dynamically based on user input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazarian, Angela; Nouri, Elnaz; Traum, David
Initiative Patterns in Dialogue Genres Inproceedings
In: Proceedings of Semdial 2014, Edinburgh, UK, 2014.
@inproceedings{nazarian_initiative_2014,
title = {Initiative Patterns in Dialogue Genres},
author = {Angela Nazarian and Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Initiative%20Patterns%20in%20Dialogue%20Genres.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of Semdial 2014},
address = {Edinburgh, UK},
abstract = {One of the ways of distinguishing different dialogue genres is the differences in patterns of interactions between the participants. Morbini et al (2013) informally define dialogue genres on the basis of features like user vs system initiative, amongst other criteria. In this paper, we apply the multi-label initiative annotation scheme and related features from (Nouri and Traum, 2014) to a set of dialogue corpora from different domains. In our initial study, we examine two questionanswering domains, a “slot-filling” service application domain, and several human-human negotiation domains.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2003
Traum, David
Semantics and Pragmatics of Questions and Answers for Dialogue Agents Inproceedings
In: International Workshop on Computational Semantics, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_semantics_2003,
title = {Semantics and Pragmatics of Questions and Answers for Dialogue Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Semantics%20and%20Pragmatics%20of%20Questions%20and%20Answers%20for%20Dialogue%20Agents.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Computational Semantics},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 313–320, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2003,
title = {Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20Coping%20Behavior%20in%20Virtual%20Humans-%20Dont%20worry%20Be%20happy.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {313--320},
address = {Melbourne, Australia},
abstract = {This article builds on insights into how humans cope with emotion to guide the design of virtual humans. Although coping is increasingly viewed in the psychological literature as having a central role in human adaptive behavior, it has been largely ignored in computational models of emotion. In this paper, we show how psychological research on the interplay between human emotion, cognition and coping behavior can serve as a central organizing principle for the behavior of human-like autonomous agents. We present a detailed domain-independent model of coping based on this framework that significantly extends our previous work. We argue that this perspective provides novel insights into realizing adaptive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff; Gratch, Jonathan; Marsella, Stacy C.
Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 441–448, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_negotiation_2003,
title = {Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training},
author = {David Traum and Jeff Rickel and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Negotiation%20over%20Tasks%20in%20Hybrid%20Human-Agent%20Teams%20for%20Simulation-Based%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {441--448},
address = {Melbourne, Australia},
abstract = {The effectiveness of simulation-based training for individual tasks – such as piloting skills – is well established, but its use for team training raises challenging technical issues. Ideally, human users could gain valuable leadership experience by interacting with synthetic teammates in realistic and potentially stressful scenarios. However, creating human-like teammates that can support flexible, natural interactions with humans and other synthetic agents requires integrating a wide variety of capabilities, including models of teamwork, models of human negotiation, and the ability to participate in face-to-face spoken conversations in virtual worlds. We have developed such virtual humans by integrating and extending prior work in these areas, and we have applied our virtual humans to an example peacekeeping training scenario to guide and evaluate our research. Our models allow agents to reason about authority and responsibility for individual actions in a team task and, as appropriate, to carry out actions, give and accept orders, monitor task execution, and negotiate options. Negotiation is guided by the agents' dynamic assessment of alternative actions given the current scenario conditions, with the aim of guiding the human user towards an ability to make similar assessments.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem (Extended Version) Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2003, 2003.
Links | BibTeX | Tags: Virtual Humans
@techreport{mao_social_2003,
title = {The Social Credit Assignment Problem (Extended Version)},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT%20TR%2002%202003.pdf},
year = {2003},
date = {2003-01-01},
number = {ICT TR 02 2003},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Gratch, Jonathan; Mao, Wenji
Automating After Action Review: Attributing Blame or Credit in Team Training Inproceedings
In: Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation, Scottsdale, AZ, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_automating_2003,
title = {Automating After Action Review: Attributing Blame or Credit in Team Training},
author = {Jonathan Gratch and Wenji Mao},
url = {http://ict.usc.edu/pubs/Automating%20After%20Action%20Review-%20Attributing%20Blame%20or%20Credit%20in%20Team%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation},
address = {Scottsdale, AZ},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem Inproceedings
In: Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA), Kloster Irsee, Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2003-1,
title = {The Social Credit Assignment Problem},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Social%20Credit%20Assignment%20Problem.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
address = {Kloster Irsee, Germany},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Habash, Nizar; Dorr, Bonnie; Traum, David
Hybrid Natural Language Generation from Lexical Conceptual Structures Journal Article
In: Machine Translation, vol. 18, pp. 81–127, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{habash_hybrid_2003,
title = {Hybrid Natural Language Generation from Lexical Conceptual Structures},
author = {Nizar Habash and Bonnie Dorr and David Traum},
url = {http://ict.usc.edu/pubs/Hybrid%20Natural%20Language%20Generation%20from%20Lexical%20%20Conceptual%20Structures.pdf},
year = {2003},
date = {2003-01-01},
journal = {Machine Translation},
volume = {18},
pages = {81--127},
abstract = {This paper describes Lexogen, a system for generating natural-language sentences from Lexical Conceptual Structure, an interlingual representation. The system has been developed as part of a Chinese–English Machine Translation (MT) system; however, it is designed to be used for many other MT language pairs and natural language applications. The contributions of this work include: (1) development of a large-scale Hybrid Natural Language Generation system with language-independent components; (2) enhancements to an interlingual representation and asso- ciated algorithm for generation from ambiguous input; (3) development of an efficient reusable language-independent linearization module with a grammar description language that can be used with other systems; (4) improvements to an earlier algorithm for hierarchically mapping thematic roles to surface positions; and (5) development of a diagnostic tool for lexicon coverage and correct- ness and use of the tool for verification of English, Spanish, and Chinese lexicons. An evaluation of Chinese–English translation quality shows comparable performance with a commercial translation system. The generation system can also be extended to other languages and this is demonstrated and evaluated for Spanish.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2002
Traum, David; Rickel, Jeff
Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_embodied_2002,
title = {Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds},
author = {David Traum and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Embodied%20Agents%20for%20Multi-party%20Dialogue%20in%20Immersive%20%20Virtual%20Worlds.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Influence of Emotion on Belief for Virtual Training Simulations Inproceedings
In: Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2002,
title = {Modeling the Influence of Emotion on Belief for Virtual Training Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20influence%20of%20emotion.pdf},
year = {2002},
date = {2002-06-01},
booktitle = {Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation},
address = {Orlando, FL},
abstract = {Recognizing and managing emotion in oneself and in those under ones command is an important component of leadership training. Most computational models of emotion have focused on the problem of identifying emotional features of the physical environment and mapping that into motivations to act in the world. But emotions also influence how we perceive the world and how we communicate that perception to others. This paper outlines an initial computational foray into this more vexing problem.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Kim, Youngjun; Gratch, Jonathan
Anticipating where to look: predicting the movements of mobile agents in complex terrain Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 821–827, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hill_anticipating_2002,
title = {Anticipating where to look: predicting the movements of mobile agents in complex terrain},
author = {Randall W. Hill and Youngjun Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Anticipating%20Where%20to%20Look-%20Predicting%20the%20Movements%20of%20Mobile%20Agents%20in%20Complex%20Terrain.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {821--827},
address = {Bologna, Italy},
abstract = {This paper describes a method for making short-term predictions about the movement of mobile agents in complex terrain. Virtual humans need this ability in order to shift their visual attention between dynamic objects-predicting where an object will be located a few seconds in the future facilitates the visual reacquisition of the target object. Our method takes into account environmental cues in making predictions and it also indicates how long the prediction is valid, which varies depending on the context. We implemented this prediction technique in a virtual pilot that flies a helicopter in a synthetic environment.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff; Marsella, Stacy C.; Gratch, Jonathan; Hill, Randall W.; Traum, David; Swartout, William
Toward a New Generation of Virtual Humans for Interactive Experiences Journal Article
In: IEEE Intelligent Systems, 2002.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{rickel_toward_2002,
title = {Toward a New Generation of Virtual Humans for Interactive Experiences},
author = {Jeff Rickel and Stacy C. Marsella and Jonathan Gratch and Randall W. Hill and David Traum and William Swartout},
url = {http://ict.usc.edu/pubs/Toward%20a%20New%20Generation%20of%20Virtual%20Humans%20for%20Interactive%20Experiences.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Marsella, Stacy C.; Gratch, Jonathan
A step toward irrationality: using emotion to change belief Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 334–341, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_step_2002,
title = {A step toward irrationality: using emotion to change belief},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20step%20toward%20irrationality-%20using%20emotion%20to%20change%20belief.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {334--341},
address = {Bologna, Italy},
abstract = {Emotions have a powerful impact on behavior and beliefs. The goal of our research is to create general computational models of this interplay of emotion, cognition and behavior to inform the design of virtual humans. Here, we address an aspect of emotional behavior that has been studied extensively in the psychological literature but largely ignored by computational approaches, emotion-focused coping. Rather than motivating external action, emotion-focused coping strategies alter beliefs in response to strong emotions. For example an individual may alter beliefs about the importance of a goal that is being threatened, thereby reducing their distress. We present a preliminary model of emotion-focused coping and discuss how coping processes, in general, can be coupled to emotions and behavior. The approach is illustrated within a virtual reality training environment where the models are used to create virtual human characters in high-stress social situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Details of the CFOR Planner Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2002, 2002.
Links | BibTeX | Tags: Virtual Humans
@techreport{gratch_details_2002,
title = {Details of the CFOR Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Details%20of%20the%20CFOR%20Planner.pdf},
year = {2002},
date = {2002-01-01},
number = {ICT TR 01.2002},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Gratch, Jonathan; Rickel, Jeff; André, Elisabeth; Cassell, Justine; Petajan, Eric; Badler, Norman
Creating Interactive Virtual Humans: Some Assembly Required Journal Article
In: IEEE Intelligent Systems, pp. 54–63, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{gratch_creating_2002,
title = {Creating Interactive Virtual Humans: Some Assembly Required},
author = {Jonathan Gratch and Jeff Rickel and Elisabeth André and Justine Cassell and Eric Petajan and Norman Badler},
url = {http://ict.usc.edu/pubs/Creating%20Interactive%20Virtual%20Humans-%20Some%20Assembly%20Required.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
pages = {54--63},
abstract = {Science fiction has long imagined a future populated with artificial humans–human-looking devices with human-like intelligence. Although Asimov's benevolent robots and the Terminator movies' terrible war machines are still a distant fantasy, researchers across a wide range of disciplines are beginning to work together toward a more modest goal–building virtual humans. These software entities look and act like people and can engage in conversation and collaborative tasks, but they live in simulated environments. With the untidy problems of sensing and acting in the physical world thus dispensed, the focus of virtual human research is on capturing the richness and dynamics of human behavior.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Traum, David
Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication Inproceedings
In: Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_ideas_2002,
title = {Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Ideas%20on%20Multi-layer%20Dialogue%20Management%20for%20Multi-party,%20Multi-conversation,%20Multi-modal%20Communication.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2001
Gratch, Jonathan; Douglas, Jay
Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after Inproceedings
In: Proceedings of International Conference on Virtual Storytelling, pp. 100–112, Avignon, France, 2001, ISBN: 3-540-42611-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_adaptive_2001,
title = {Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after},
author = {Jonathan Gratch and Jay Douglas},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
doi = {10.1007/3-540-45420-9_12},
isbn = {3-540-42611-6},
year = {2001},
date = {2001-10-01},
booktitle = {Proceedings of International Conference on Virtual Storytelling},
pages = {100--112},
address = {Avignon, France},
series = {LNCS},
abstract = {Interacting Storytelling systems integrate AI techniques such as planning with narrative representations to generate stories. In this paper, we discuss the use of planning formalisms in Interactive Storytelling from the perspective of story generation and authoring. We compare two different planning formalisms, Hierarchical Task Network (HTN) planning and Heuristic Search Planning (HSP). While HTN provide a strong basis for narrative coherence in the context of interactivity, HSP offer additional flexibility and the generation of stories and the mechanisms for generating comic situations.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Douglas, Jay; Gratch, Jonathan
Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After Inproceedings
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{douglas_adaptive_2001,
title = {Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After},
author = {Jay Douglas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {Creating dramatic narratives for real-time virtual reality environments is complicated by the lack of temporal distance between the occurrence of an event and its telling in the narrative. This paper describes the application of a multiprocessing operating system architecture to the creation of adaptive narratives, narratives that use autonomous actors or agents to create real-time dramatic experiences for human interactors. We also introduce the notion of dramatic acts and dramatic functions and indicate their use in constructing this real-time drama.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Hill, Randall W.; Gratch, Jonathan; Johnson, W. Lewis; Kyriakakis, Chris; Labore, Catherine; Lindheim, Richard; Marsella, Stacy C.; Miraglia, D.; Moore, Bridget; Morie, Jacquelyn; Rickel, Jeff; Thiebaux, Marcus; Tuch, L.; Whitney, Richard; Douglas, Jay
Toward the Holodeck: Integrating Graphics, Sound, Character and Story Inproceedings
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans, Virtual Worlds
@inproceedings{swartout_toward_2001,
title = {Toward the Holodeck: Integrating Graphics, Sound, Character and Story},
author = {William Swartout and Randall W. Hill and Jonathan Gratch and W. Lewis Johnson and Chris Kyriakakis and Catherine Labore and Richard Lindheim and Stacy C. Marsella and D. Miraglia and Bridget Moore and Jacquelyn Morie and Jeff Rickel and Marcus Thiebaux and L. Tuch and Richard Whitney and Jay Douglas},
url = {http://ict.usc.edu/pubs/Toward%20the%20Holodeck-%20Integrating%20Graphics,%20Sound,%20Character%20and%20Story.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {We describe an initial prototype of a holodeck-like environment that we have created for the Mission Rehearsal Exercise Project. The goal of the project is to create an experience learning system where the participants are immersed in an environment where they can encounter the sights, sounds, and circumstances of realworld scenarios. Virtual humans act as characters and coaches in an interactive story with pedagogical goals.},
keywords = {Social Simulation, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans Inproceedings
In: Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_effect_2001,
title = {The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affect-%20Modeling%20the%20Impact%20of%20Emotional%20State%20on%20the%20Behavior%20of%20Interactive%20Virtual%20Humans.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents},
address = {Montreal, Canada},
abstract = {A person's behavior provides signiï¬cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a range of cognitive and motor capabilities, a model of emotional appraisal, and a model of the impact of emotional state on physical behavior. We describe the key research issues, our approach, and an initial implementation in an Army peacekeeping scenario.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Modeling Emotions in the Mission Rehearsal Exercise Inproceedings
In: Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation, pp. 457–466, Orlando, FL, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_modeling_2001,
title = {Modeling Emotions in the Mission Rehearsal Exercise},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Emotions%20in%20the%20Mission%20Rehearsal%20Exercise.pdf},
year = {2001},
date = {2001-05-01},
booktitle = {Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation},
pages = {457--466},
address = {Orlando, FL},
abstract = {This paper discusses our attempts to model realistic human behavior in the context of the Mission Rehearsal Exercise system (MRE), a high-end virtual training environment designed to support dismounted infantry training between a human participant and elements of his command. The system combines immersive graphics, sound, and interactive characters controlled by artificial intelligence programs. Our goal in this paper is to show how some of the daunting subtlety in human behavior can be modeled by intelligent agents and in particular to focus on the role of modeling typical human emotional responses to environmental stimuli.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Inproceedings
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive ínterlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Olsen, Mari; Traum, David; Ess-Dykema, Carol Van; Weinberg, Amy
Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System Inproceedings
In: Machine Translation Summit VIII, Santiago de Compostela, Spain, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{olsen_implicit_2001,
title = {Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System},
author = {Mari Olsen and David Traum and Carol Van Ess-Dykema and Amy Weinberg},
url = {http://ict.usc.edu/pubs/Implicit%20Cues%20for%20Explicit%20Generation-%20Using%20Telicity%20as%20a%20Cue%20for%20Tense%20Structure%20in%20Chinese%20to%20English%20MT%20System.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Machine Translation Summit VIII},
address = {Santiago de Compostela, Spain},
abstract = {In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Tense information is crucial to providing accurate and fluent translations into English. Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is optional in Chinese and is not present in the majority of sentences. We report on a project that assesses the relative contribution of the lexical aspect features of (a)telicity reflected in the Lexical Conceptual Structure of the input text, versus more overt aspectual and adverbial markers of tense, to suggest tense structure in the English translation of a Chinese newspaper corpus. Incorporating this information allows a 20% to 35% boost in the accuracy of tense relization with the best accuracy rate of 92% on a corpus of Chinese articles.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents Inproceedings
In: Proceedings of the 5th International Conference on Autonomous Agents, pp. 278–285, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_tears_2001,
title = {Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Tears%20and%20Fears-%20Modeling%20emotions%20and%20emotional%20behaviors%20in%20synthetic%20agents.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
pages = {278--285},
address = {Montreal, Canada},
abstract = {Emotions play a critical role in creating engaging and believable characters to populate virtual worlds. Our goal is to create general computational models to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. In service of this goal, we integrate two complementary approaches to emotional modeling into a single unified system. Gratch's Émile system focuses on the problem of emotional appraisal: how emotions arise from an evaluation of how environmental events relate to an agent's plans and goals. Marsella et al. 's IPD system focuses more on the impact of emotions on behavior, including the impact on the physical expressions of emotional state through suitable choice of gestures and body language. This integrated model is layered atop Steve, a pedagogical agent architecture, and exercised within the context of the Mission Rehearsal Exercise, a prototype system designed to teach decision- making skills in highly evocative situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Damiano, Rossana; Traum, David
Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems Inproceedings
In: NAACL 2001 Workshop on Adaptation in Dialogue Systems, 2001.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{damiano_anticipatory_2001,
title = {Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems},
author = {Rossana Damiano and David Traum},
url = {http://ict.usc.edu/pubs/Anticipatory%20planning%20for%20decision-theoretic%20grounding%20and%20task%20advancement%20in%20mixed-initiative%20dialogue%20systems.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {NAACL 2001 Workshop on Adaptation in Dialogue Systems},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations Inproceedings
In: Proceedings of 23rd Annual Conference of the Cognitive Science Society, Edinburgh, Scotland, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2001,
title = {Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20of%20Emotions%20and%20Plans%20in%20Multi-Agent%20Simulations.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 23rd Annual Conference of the Cognitive Science Society},
address = {Edinburgh, Scotland},
abstract = {The goal of this research is to create general computational models of the interplay between affect, cognition and behavior. These models are being designed to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. We attempt to capture both the cognitive and behavioral aspects of emotion, circumscribed to the role emotions play in the performance of concrete physical tasks. We address how emotions arise from an evaluation of the relationship between environmental events and an agent's plans and goals, as well as the impact of emotions on behavior, in particular the impact on the physical expressions of emotional state through suitable choice of gestures and body language. The approach is illustrated within a virtual reality training environment.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2000
Gratch, Jonathan
Human-like behavior, alas, demands human-like intellect Inproceedings
In: Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents, Barcelona, Spain, 2000.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_human-like_2000,
title = {Human-like behavior, alas, demands human-like intellect},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Human-like%20behavior%20alas%20demands%20human-like%20intellect.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents},
address = {Barcelona, Spain},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Gratch, Jonathan; Rosenbloom, Paul
Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces Inproceedings
In: Proceedings of the 4th International Conference on Autonomous Agents, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Social Simulation, Virtual Humans
@inproceedings{hill_flexible_2000,
title = {Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces},
author = {Randall W. Hill and Jonathan Gratch and Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Flexible%20Group%20Behavior-%20Virtual%20Commanders%20for%20Synthetic%20Battlespaces.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
address = {Barcelona, Spain},
abstract = {This paper describes a project to develop autonomous commander agents for synthetic battlespaces. The commander agents plan missions, monitor their execution, and replan when necessary. To reason about the social aspects of group behavior, the commanders take various social stances that enable them to collaborate with friends, exercise or defer to authority, and thwart their foes. The purpose of this paper is to describe these capabilities and how they came to be through a series of lessons learned while developing autonomous agents for this domain.},
keywords = {CogArch, Cognitive Architecture, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Gratch, Jonathan
How Long Can an Agent Look Away From a Target? Inproceedings
In: 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_how_2000,
title = {How Long Can an Agent Look Away From a Target?},
author = {Youngjun Kim and Randall W. Hill and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/how%20long%20can%20you%20look%20away%20from%20a%20target.pdf},
year = {2000},
date = {2000-05-01},
booktitle = {9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Situation awareness (SA) is the perception of the elements in the environment within a volume of time and space, the comprehension of their meaning, and the projection of their status in the near future [3]. Although the impact of situation awareness and assessment on humans in complex systems is clear, no one theory for SA has been developed. A critical aspect of the SA problem is that agents must construct an overall view of a dynamically changing world using limited sensor channels. For instance, a (virtual) pilot, who visually tracks the location and direction of several vehicles that he cannot see simultaneously, must shift its visual field of view to scan the environment and to sense the situation involved. How he directs his attention, for how long, and how he efficiently reacquires targets is the central question we address in this paper. We describe the perceptual coordination that helps a virtual pilot efficiently track one or more objects. In SA, it is important for a virtual pilot having a limited visual field of view to gather more information from its environment and to choose appropriate actions to take in the environment without losing the target.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Modeling the Interplay Between Emotion and Decision-Making Inproceedings
In: Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_modeling_2000,
title = {Modeling the Interplay Between Emotion and Decision-Making},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20Between%20Emotion%20and%20Decision-Making.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Current models of computer-generated forces are limited by their inability to model many of the moderators that influence the performance of real troops in the field such as the effects of stress, emotion, and individual differences. This article discusses an extension to our command and control modeling architecture that begins to address how behavioral moderators influence the command decision-making process. Our Soar-Cfor command architecture was developed under the STOW and ASTT programs to support distributed command and control decision-making in the domain of army aviation planning. We have recently extended this architecture to model how people appraise the emotional significance of events and how these events influence decision making.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Socially Situated Planning Incollection
In: Socially Intelligent Agents, Multiagent Systems, Artificial Societies, and Simulated Organizations, vol. 3, pp. 181–188, AAAI Fall Symposium on Socially Intelligent Agents - The Human in the Loop, North Falmouth, MA, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{gratch_socially_2000,
title = {Socially Situated Planning},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Socially%20Situated%20Planning.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Socially Intelligent Agents, Multiagent Systems, Artificial Societies, and Simulated Organizations},
volume = {3},
pages = {181--188},
address = {AAAI Fall Symposium on Socially Intelligent Agents - The Human in the Loop, North Falmouth, MA},
abstract = {Introduction: Virtual environments such as training simulators and video games do an impressive job at modeling the physical dynamics of synthetic worlds but fall short when modeling the social dynamics of anything but the most impoverished human encounters. Yet the social dimension is at least as important as good graphics for creating an engaging game or effective training tool. Commercial flight simulators accurately model the technical aspects of flight but many aviation disasters arise from social breakdowns: poor management skills in the cockpit, or the effects of stress and emotion. Perhaps the biggest consumer of simulation technology, the U.S. military, identifies unrealistic human and organizational behavior as a major limitation of existing simulation technology (NRC, 1998). And of course the entertainment industry has long recognized the importance of good character, emotional attachment and rich social interactions to "put butts in seats." This article describes a research effort to endow virtual training environments with richer models of social behavior. We have been developing autonomous and semi-autonomous software agents that plan and act while situated in a social network of other entities, human and synthetic (Hill et. al, 1997; Tambe, 1997; Gratch and Hill, 1999). My work has focused on making agents act in an organization and obey social constraints, coordinate their behavior, negotiate conflicts, but also obey their own self-interest and show a range of individual differences in their behavior and willingness to violate social norms, albeit within the relatively narrow context of a specific training exercise.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gratch, Jonathan
Èmile: Marshalling Passions in Training and Education Inproceedings
In: Proceedings of the 4th International Conference on Autonomous Agents, pp. 325–332, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_emile_2000,
title = {Èmile: Marshalling Passions in Training and Education},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emile-%20Marshalling%20Passions%20in%20Training%20and%20Education.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
pages = {325--332},
address = {Barcelona, Spain},
abstract = {Emotional reasoning can be an important contribution to automated tutoring and training systems. This paper describes �mile, a model of emotional reasoning that builds upon existing approaches and significantly generalizes and extends their capabilities. The main contribution is to show how an explicit planning model allows a more general treatment of several stages of the reasoning process. The model supports educational applications by allowing agents to appraise the emotional significance of events as they relate to students' (or their own) plans and goals, model and predict the emotional state of others, and alter behavior accordingly.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
1999
Gratch, Jonathan; Marsella, Stacy C.; Hill, Randall W.; III, LTC George Stone
Deriving Priority Intelligence Requirements for Synthetic Command Entities Inproceedings
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_deriving_1999,
title = {Deriving Priority Intelligence Requirements for Synthetic Command Entities},
author = {Jonathan Gratch and Stacy C. Marsella and Randall W. Hill and LTC George Stone III},
url = {http://ict.usc.edu/pubs/Deriving%20Priority%20Intelligence%20Requirements%20for%20Synthetic%20Command%20Entities.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {Simulation-based training is using increasingly complex synthetic forces. As more complex multiechelon synthetic forces are employed in simulations, the need for a realistic model of their command and control behavior becomes more urgent. In this paper we discuss one key component of such a model, the autonomous generation and use of priority intelligence requirements within multi-echelon plans.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Hill, Randall W.
Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces Inproceedings
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_continuous_1999,
title = {Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces},
author = {Jonathan Gratch and Randall W. Hill},
url = {http://ict.usc.edu/pubs/Continuous%20Planning%20and%20Collaboration%20for%20Command%20and%20Control%20in%20Joint%20Synthetic%20Battlespaces.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {In this paper we describe our efforts to model command and control entities for Joint Synthetic Battlespaces. Command agents require a broader repertoire of capabilities than is typically modeled in simulation. They must develop mission plans involving multiple subordinate units, monitor execution, dynamically modify mission plans in response to situational contingencies, collaborate with other decision makers, and deal with a host of organizational issues. We describe our approach to command agent modeling that addresses a number of these issues through its continuous and collaborative approach to mission planning.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Andersen, Carl F.; Chong, Waiyian; Josyula, Darsana; Okamoto, Yoshi; Purang, Khemdut; O'Donovan-Anderson, Michael; Perlis, Don
Representations of Dialogue State for Domain and Task Independent Meta-Dialogue Journal Article
In: Electronic Transactions on Artificial Intelligence, vol. 3, pp. 125–152, 1999.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{traum_representations_1999,
title = {Representations of Dialogue State for Domain and Task Independent Meta-Dialogue},
author = {David Traum and Carl F. Andersen and Waiyian Chong and Darsana Josyula and Yoshi Okamoto and Khemdut Purang and Michael O'Donovan-Anderson and Don Perlis},
url = {http://ict.usc.edu/pubs/Representations%20of%20Dialogue%20State%20for%20Domain%20and%20Task%20Independent%20Meta-Dialogue.pdf},
year = {1999},
date = {1999-01-01},
journal = {Electronic Transactions on Artificial Intelligence},
volume = {3},
pages = {125--152},
abstract = {We propose a representation of local dialogue context motivated by the need to react appropriately to meta-dialogue, such as various sorts of corrections to the sequence of an instruction and response action. Such contexts includes at least the following aspects: the words and linguistic structures uttered, the domain correlates of those linguistic structures, and plans and actions in response. Each of these is needed as part of the context in order to be able to correctly interpret the range of possible corrections. Partitioning knowledge of dialogue structure in this way may lead to an ability to represent generic dialogue structure (e.g., in the form of axioms), which can be particularized to the domain, topic and content of the dialogue.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
Why You Should Buy an Emotional Planner Inproceedings
In: Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures, 1999.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_why_1999,
title = {Why You Should Buy an Emotional Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Why%20You%20Should%20Buy%20an%20Emotional%20Planner.pdf},
year = {1999},
date = {1999-01-01},
booktitle = {Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures},
abstract = {Computation models of emotion have begun to address the problem of how agents arrive at a given emotional state, and how that state might alter their reactions to the environment. Existing work has focused on reactive models of behavior and does not, as of yet, provide much insight on how emotion might relate to the construction and execution of complex plans. This article focuses on this later question. I present a model of how agents ap- praise the emotion significance of events that illustrates a complementary relationship between classical planning methods and models of emotion processing. By building on classical planning methods, the model clarifies prior accounts of emotional appraisal and extends these ac- counts to handle the generation and execution of com- plex multi-agent plans.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
0000
Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: pp. 35, 0000.
Abstract | BibTeX | Tags: DTIC, MedVR, Virtual Humans, VR
@article{hartholt_combat_nodate,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Arno Hartholt and Sharon Mozgai},
pages = {35},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
keywords = {DTIC, MedVR, Virtual Humans, VR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing Journal Article
In: pp. 9, 0000.
BibTeX | Tags: Emotions, Virtual Humans
@article{gratch_emotion_nodate,
title = {Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing},
author = {Jonathan Gratch},
pages = {9},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 0000.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, UARC, Virtual Humans
@article{gervits_classication-based_nodate,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {ARL, Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}