Publications
Search
Kang, Sin-Hwa; Feng, Andrew W.; Seymour, Mike; Shapiro, Ari
Study comparing video-based characters and 3D-based characters on mobile devices for chat Proceedings Article
In: Proceedings of the 9th International Conference on Motion in Games, pp. 181–186, ACM Press, Burlingame, California, 2016, ISBN: 978-1-4503-4592-7.
@inproceedings{kang_study_2016,
title = {Study comparing video-based characters and 3D-based characters on mobile devices for chat},
author = {Sin-Hwa Kang and Andrew W. Feng and Mike Seymour and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2994274},
doi = {10.1145/2994258.2994274},
isbn = {978-1-4503-4592-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 9th International Conference on Motion in Games},
pages = {181–186},
publisher = {ACM Press},
address = {Burlingame, California},
abstract = {This study explores presentation techniques for a chat-based virtual human that communicates engagingly with users. Interactions with the virtual human occur via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with an animated virtual character as opposed to a real human video character capable of displaying realistic backchannel behaviors. An audio-only interface is compared additionally with the two types of characters. The findings of our study suggest that people are socially attracted to a 3D animated character that does not display backchannel behaviors more than a real human video character that presents realistic backchannel behaviors. People engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that exhibits realistic backchannel behaviors, compared to communicating with a real human video character that does not display backchannel behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Feng, Andrew W.; Seymour, Mike; Shapiro, Ari
Smart Mobile Virtual Characters: Video Characters vs. Animated Characters Proceedings Article
In: Proceedings of the Fourth International Conference on Human Agent Interaction, pp. 371–374, ACM Press, Biopolis, Singapore, 2016, ISBN: 978-1-4503-4508-8.
@inproceedings{kang_smart_2016,
title = {Smart Mobile Virtual Characters: Video Characters vs. Animated Characters},
author = {Sin-Hwa Kang and Andrew W. Feng and Mike Seymour and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2980511},
doi = {10.1145/2974804.2980511},
isbn = {978-1-4503-4508-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the Fourth International Conference on Human Agent Interaction},
pages = {371–374},
publisher = {ACM Press},
address = {Biopolis, Singapore},
abstract = {This study investigates presentation techniques for a chatbased virtual human that communicates engagingly with users via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with an animated 3D virtual character as opposed to a real human video character capable of displaying backchannel behaviors. The findings of our study demonstrate that people are socially attracted to a 3D animated character that does not display backchannel behaviors more than a real human video character that presents realistic backchannel behaviors. People engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that exhibits backchannel behaviors, compared to communicating with a real human video character that does not display backchannel behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Chandrashekhar, Nithin; Shapiro, Ari; Morency, Louis-Philippe; Scherer, Stefan
Manipulating the Perception of Virtual Audiences using Crowdsourced Behaviors Proceedings Article
In: Proceedings of the IVA 2016 : Intelligent Virtual Agents Conference, Springer, Los Angeles, CA, 2016.
@inproceedings{chollet_manipulating_2016,
title = {Manipulating the Perception of Virtual Audiences using Crowdsourced Behaviors},
author = {Mathieu Chollet and Nithin Chandrashekhar and Ari Shapiro and Louis-Philippe Morency and Stefan Scherer},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110162.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the IVA 2016 : Intelligent Virtual Agents Conference},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {Virtual audiences are used for training public speaking and mitigating anxiety related to it. However, research has been scarce on studying how virtual audiences are perceived and which non-verbal behaviors should be used to make such an audience appear in particular states, such as boredom or engagement. Recently, crowdsourcing methods have been proposed for collecting data for building virtual agents' behavior models. In this paper, we use crowdsourcing for creating and evaluating a nonverbal behaviors generation model for virtual audiences. We show that our model successfully expresses relevant audience states (i.e. low to high arousal, negative to positive valence), and that the overall impression exhibited by the virtual audience can be controlled my manipulating the amount of individual audience members that display a congruent state.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bresnahan, T.; Rizzo, A.; Burke, S. L.; Partin, M.; Ahlness, R. M.; Trimmer, M.
Using Virtual Interactive Training Agents (VITA) with Adults with Autism and other Developmental Disabilities Proceedings Article
In: Proceedings of the 2016 International Conference on Disability, Virtual Reality, and Associated Technology, pp. 49–56, ICDVRAT and the University of Reading, Los Angeles, CA, 2016, ISBN: 978-0-7049-1547-3.
@inproceedings{bresnahan_using_2016,
title = {Using Virtual Interactive Training Agents (VITA) with Adults with Autism and other Developmental Disabilities},
author = {T. Bresnahan and A. Rizzo and S. L. Burke and M. Partin and R. M. Ahlness and M. Trimmer},
url = {http://www.icdvrat.org/2016/papers/ICDVRAT2016_S02N2_Bresnahan_etal.pdf},
isbn = {978-0-7049-1547-3},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 International Conference on Disability, Virtual Reality, and Associated Technology},
pages = {49–56},
publisher = {ICDVRAT and the University of Reading},
address = {Los Angeles, CA},
abstract = {Conversational Virtual Human (VH) agents are increasingly being used to support role-play experiential learning across a range of use-cases and populations. This project examined whether use of the Virtual Interactive Training Agent (VITA) system would improve job interviewing skills in a sample of persons with autism or other developmental disability. The study examined performance differences between baseline and final interviews in face-to-face and virtual reality conditions, and whether statistically significant increases were demonstrated between interviewing conditions. Paired samples t-tests were utilized to examine mean changes in performance by interview stage and in the overall difference between baseline and final interview stages. The preliminary results indicated that VITA is a positive factor when preparing young adults with autism or other developmental disability for employment interviews. Statistically significant results were demonstrated across all pilot conditions and in all but one post-assessment condition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dennison, Mark; Neubauer, Cathy; Passaro, Tony; Harrison, Andre; Scherer, Stefan; Khooshabeh, Pete
Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task Proceedings Article
In: Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop, Los Angeles, CA, 2016.
@inproceedings{dennison_using_2016,
title = {Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task},
author = {Mark Dennison and Cathy Neubauer and Tony Passaro and Andre Harrison and Stefan Scherer and Pete Khooshabeh},
url = {http://marksdennison.com/s/DennisonPAVA2016.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop},
address = {Los Angeles, CA},
abstract = {Teams of two individuals worked together in a high-intensity simu-lated bomb diffusing task. Half the teams were given icebreaker social time to increase comfort and familiarity with each other and the remaining half of the teams served as controls and did not meet until the task began. Electrocardiog-raphy and impedance cardiography were recorded to examine cardiac changes during task cooperation. Changes in ventricular contractility showed that individ-uals who had taken part in the icebreaker showed increased task engagement over time whereas controls showed the opposite. Data also trended to show that ice-breaker participants were in a challenge state and controls were in a threat state during the final thirty seconds of bomb defusal. Finally, we show that a set of cardiac features can be used to classify participant data as belonging to the ice-breaker or control groups with an accuracy as high as 88%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahn, Emily; Morbini, Fabrizio; Gordon, Andrew S.
Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing Proceedings Article
In: Proceedings of the 9th International Natural Language Generation Conference (INLG-2016), Edinburgh, UK, 2016.
@inproceedings{ahn_improving_2016,
title = {Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing},
author = {Emily Ahn and Fabrizio Morbini and Andrew S. Gordon},
url = {https://www.researchgate.net/publication/307512031_Improving_Fluency_in_Narrative_Text_Generation_With_Grammatical_Transformations_and_Probabilistic_Parsing},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 9th International Natural Language Generation Conference (INLG-2016)},
address = {Edinburgh, UK},
abstract = {In research on automatic generation of narrative text, story events are often formally represented as a causal graph. When serializing and realizing this causal graph as natural language text, simple approaches produce cumbersome sentences with repetitive syntactic structure, e.g. long chains of “because” clauses. In our research, we show that the fluency of narrative text generated from causal graphs can be improved by applying rule-based grammatical transformations to generate many sentence variations with equivalent semantics, then selecting the variation that has the highest probability using a probabilistic syntactic parser. We evaluate our approach by generating narrative text from causal graphs that encode 100 brief stories involving the same three characters, based on a classic film of experimental social psychology. Crowdsourced workers judged the writing quality of texts generated with ranked transformations as significantly higher than those without, and not significantly lower than human-authored narratives of the same situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Sayan; Laksana, Eugene; Morency, Louis-Philippe; Scherer, Stefan
Representation Learning for Speech Emotion Recognition Journal Article
In: Interspeech 2016, pp. 3603–3607, 2016.
@article{ghosh_representation_2016,
title = {Representation Learning for Speech Emotion Recognition},
author = {Sayan Ghosh and Eugene Laksana and Louis-Philippe Morency and Stefan Scherer},
url = {https://www.researchgate.net/publication/307889274_Representation_Learning_for_Speech_Emotion_Recognition},
doi = {10.21437},
year = {2016},
date = {2016-09-01},
journal = {Interspeech 2016},
pages = {3603–3607},
abstract = {Speech emotion recognition is an important problem with applications as varied as human-computer interfaces and affective computing. Previous approaches to emotion recognition have mostly focused on extraction of carefully engineered features and have trained simple classifiers for the emotion task. There has been limited effort at representation learning for affect recognition, where features are learnt directly from the signal waveform or spectrum. Prior work also does not investigate the effect of transfer learning from affective attributes such as valence and activation to categorical emotions. In this paper, we investigate emotion recognition from spectrogram features extracted from the speech and glottal flow signals; spectrogram encoding is performed by a stacked autoencoder and an RNN (Recurrent Neural Network) is used for classification of four primary emotions. We perform two experiments to improve RNN training : (1) Representation Learning - Model training on the glottal flow signal to investigate the effect of speaker and phonetic invariant features on classification performance (2) Transfer Learning - RNN training on valence and activation, which is adapted to a four emotion classification task. On the USC-IEMOCAP dataset, our proposed approach achieves a performance comparable to the state of the art speech emotion recognition systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ghosh, Sayan; Laksana, Eugene; Morency, Louis-Philippe; Scherer, Stefen
An Unsupervised Approach to Glottal Inverse Filtering Proceedings Article
In: Proceedings of the 2016 24th European Signal Processing Conference (EUSIPCO), Budapest, Hungary, 2016.
@inproceedings{ghosh_unsupervised_2016,
title = {An Unsupervised Approach to Glottal Inverse Filtering},
author = {Sayan Ghosh and Eugene Laksana and Louis-Philippe Morency and Stefen Scherer},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2016/papers/1570252319.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 24th European Signal Processing Conference (EUSIPCO)},
address = {Budapest, Hungary},
abstract = {The extraction of the glottal volume velocity waveform from voiced speech is a well-known example of a sparse signal recovery problem. Prior approaches have mostly used wellengineered speech processing or convex L1-optimization methods to solve the inverse filtering problem. In this paper, we describe a novel approach to modeling the human vocal tract using an unsupervised dictionary learning framework. We make the assumption of an all-pole model of the vocal tract, and derive an L1 regularized least squares loss function for the all-pole approximation. To evaluate the quality of the extracted glottal volume velocity waveform, we conduct experiments on real-life speech datasets, which include vowels and multi-speaker phonetically balanced utterances. We find that the the unsupervised model learns meaningful dictionaries of vocal tracts, and the proposed data-driven unsupervised framework achieves a performance comparable to the IAIF (Iterative Adaptive Inverse Filtering) glottal flow extraction approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mizukami, Masahiro; Yoshino, Koichiro; Neubig, Graham; Traum, David; Nakamura, Satoshi
Analyzing the Effect of Entrainment on Dialogue Acts Proceedings Article
In: Proceedings of the SIGDIAL 2016 Conference, pp. 310–318, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{mizukami_analyzing_2016,
title = {Analyzing the Effect of Entrainment on Dialogue Acts},
author = {Masahiro Mizukami and Koichiro Yoshino and Graham Neubig and David Traum and Satoshi Nakamura},
url = {http://www.sigdial.org/workshops/conference17/proceedings/pdf/SIGDIAL40.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the SIGDIAL 2016 Conference},
pages = {310–318},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {Entrainment is a factor in dialogue that affects not only human-human but also human-machine interaction. While entrainment on the lexical level is well documented, less is known about how entrainment affects dialogue on a more abstract, structural level. In this paper, we investigate the effect of entrainment on dialogue acts and on lexical choice given dialogue acts, as well as how entrainment changes during a dialogue. We also define a novel measure of entrainment to measure these various types of entrainment. These results may serve as guidelines for dialogue systems that would like to entrain with users in a similar manner.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marge, Matthew; Bonial, Claire; Pollard, Kimberly A.; Artstein, Ron; Byrne, Brendan; Hill, Susan G.; Voss, Clare; Traum, David
Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards Proceedings Article
In: Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),, Springer, Los Angeles, CA, 2016.
@inproceedings{marge_assessing_2016,
title = {Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards},
author = {Matthew Marge and Claire Bonial and Kimberly A. Pollard and Ron Artstein and Brendan Byrne and Susan G. Hill and Clare Voss and David Traum},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110460.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {The Wizard-of-Oz (WOz) method is a common experimental technique in virtual agent and human-robot dialogue research for eliciting natural communicative behavior from human partners when full autonomy is not yet possible. For the first phase of our research reported here, wizards play the role of dialogue manager, acting as a robot’s dialogue processing. We describe a novel step within WOz methodology that incorporates two wizards and control sessions: the wizards function much like corpus annotators, being asked to make independent judgments on how the robot should respond when receiving the same verbal commands in separate trials. We show that inter-wizard discussion after the control sessions and the resolution with a reconciled protocol for the follow-on pilot sessions successfully impacts wizard behaviors and significantly aligns their strategies. We conclude that, without control sessions, we would have been unlikely to achieve both the natural diversity of expression that comes with multiple wizards and a better protocol for modeling an automated system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hiraoka, Takuya; Georgila, Kallirroi; Nouri, Elnaz; Traum, David; Nakamura, Satoshi
Reinforcement Learning of Multi-Party Trading Dialog Policies Journal Article
In: Transactions of the Japanese Society for Artificial Intelligence, vol. 31, 2016, ISSN: 1346-8030.
@article{hiraoka_reinforcement_2016,
title = {Reinforcement Learning of Multi-Party Trading Dialog Policies},
author = {Takuya Hiraoka and Kallirroi Georgila and Elnaz Nouri and David Traum and Satoshi Nakamura},
url = {https://www.jstage.jst.go.jp/article/tjsai/31/4/31_B-FC1/_pdf},
issn = {1346-8030},
year = {2016},
date = {2016-09-01},
journal = {Transactions of the Japanese Society for Artificial Intelligence},
volume = {31},
abstract = {Trading dialogs are a kind of negotiation in which an exchange of ownership of items is discussed, and these kinds of dialogs are pervasive in many situations. Recently, there has been an increasing amount of research on applying reinforcement learning (RL) to negotiation dialog domains. However, in previous research, the focus was on negotiation dialog between two participants only, ignoring cases where negotiation takes place between more than two interlocutors. In this paper, as a first study on multi-party negotiation, we apply RL to a multi-party trading scenario where the dialog system (learner) trades with one, two, or three other agents. We experiment with different RL algorithms and reward functions. We use Q-learning with linear function approximation, least-squares policy iteration, and neural fitted Q iteration. In addition, to make the learning process more efficient, we introduce an incremental reward function. The negotiation strategy of the learner is learned through simulated dialog with trader simulators. In our experiments, we evaluate how the performance of the learner varies depending on the RL algorithm used and the number of traders. Furthermore, we compare the learned dialog policies with two strong hand-crafted baseline dialog policies. Our results show that (1) even in simple multi-party trading dialog tasks, learning an effective negotiation policy is not a straightforward task and requires a lot of experimentation; and (2) the use of neural fitted Q iteration combined with an incremental reward function produces negotiation policies as effective or even better than the policies of the two strong hand-crafted baselines.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, AA; Lucas, G; Gratch, J; Stratou, G; Morency, L-P; Shilling, R; Hartholt, A; Scherer, S
Clinical interviewing by a virtual human agent with automatic behavior analysis Proceedings Article
In: Proceedings of The 2016 Proceedings of the International Conference on Disability, Virtual Reality and Associated Technologies, pp. 57–64, ICDVRAT and the University of Reading, Los Angeles, CA, 2016, ISBN: 978-0-7049-1547-3.
@inproceedings{rizzo_clinical_2016,
title = {Clinical interviewing by a virtual human agent with automatic behavior analysis},
author = {AA Rizzo and G Lucas and J Gratch and G Stratou and L-P Morency and R Shilling and A Hartholt and S Scherer},
url = {http://centaur.reading.ac.uk/66645/8/ICDVRAT2016_Full_Proceedings_11th%20_Conf.pdf},
isbn = {978-0-7049-1547-3},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of The 2016 Proceedings of the International Conference on Disability, Virtual Reality and Associated Technologies},
pages = {57–64},
publisher = {ICDVRAT and the University of Reading},
address = {Los Angeles, CA},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. As well, previous research with SimSensei indicates that users engaging with this automated system, have less fear of evaluation and self-disclose more personal information compare to when they believe the VH agent is actually an avatar being operated by a “wizard of oz” human-in-the-loop (Lucas et al., 2014). The current study presents results from a sample of military service members (SMs) who were interviewed within the SimSensei system before and after a deployment to Afghanistan. Results indicate that SMs reveal more PTSD symptoms to the SimSensei VH agent than they self-report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and fewer happy expressions at post deployment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; DeVault, David; Lucas, Gale
The Benefits of Virtual Humans for Teaching Negotiation Proceedings Article
In: Proceedings of the 16th International Conference on Intelligent Virtual Agents (IVA), 2016, Springer, Los Angeles, CA, 2016.
@inproceedings{gratch_benefits_2016,
title = {The Benefits of Virtual Humans for Teaching Negotiation},
author = {Jonathan Gratch and David DeVault and Gale Lucas},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110276.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 16th International Conference on Intelligent Virtual Agents (IVA), 2016},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {This article examines the potential for teaching negotiation with virtual humans. Many people find negotiations to be aversive. We conjecture that stu-dents may be more comfortable practicing negotiation skills with an agent than with another person. We test this using the Conflict Resolution Agent, a semi-automated virtual human that negotiates with people via natural language. In a between-participants design, we independently manipulated two pedagogically-relevant factors while participants engaged in repeated negotiations with the agent: perceived agency (participants either believed they were negotiating with a computer program or another person) and pedagogical feedback (participants received instructional advice or no advice between negotiations). Findings indi-cate that novice negotiators were more comfortable negotiating with a computer program (they self-reported more comfort and punished their opponent less of-ten) and expended more effort on the exercise following instructional feedback (both in time spent and in self-reported effort). These findings lend support to the notion of using virtual humans to teach interpersonal skills.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Konovalov, Vasily; Melamud, Oren; Artstein, Ron; Dagan, Ido
Collecting Better Training Data using Biased Agent Policies in Negotiation Dialogues Proceedings Article
In: Proceedings of WOCHAT, the Second Workshop on Chatbots and Conversational Agent Technologies, Zerotype, Los Angeles, 2016.
@inproceedings{konovalov_collecting_2016,
title = {Collecting Better Training Data using Biased Agent Policies in Negotiation Dialogues},
author = {Vasily Konovalov and Oren Melamud and Ron Artstein and Ido Dagan},
url = {http://workshop.colips.org/wochat/documents/RP-270.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of WOCHAT, the Second Workshop on Chatbots and Conversational Agent Technologies},
publisher = {Zerotype},
address = {Los Angeles},
abstract = {When naturally occurring data is characterized by a highly skewed class distribution, supervised learning often benefits from reducing this skew. Human-agent dialogue data is commonly highly skewed when using standard agent policies. Hence, we suggest that agent policies need to be reconsidered in the context of training data collection. Specifically, in this work we implemented biased agent policies that are optimized for data collection in the negotiation domain. Empirical evaluations show that our method is successful in collecting a reasonably balanced corpus in the highly skewed Job-Candidate domain. Furthermore, using this balanced corpus to train a negotiation intent classifier yields notable performance improvements relative to naturally distributed data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Kennington, Casey; DeVault, David; Schlangen, David
Real-Time Understanding of Complex Discriminative Scene Descriptions Proceedings Article
In: Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 232–241, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{manuvinakurike_real-time_2016,
title = {Real-Time Understanding of Complex Discriminative Scene Descriptions},
author = {Ramesh Manuvinakurike and Casey Kennington and David DeVault and David Schlangen},
url = {http://www.aclweb.org/anthology/W16-3630},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {232–241},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {Real-world scenes typically have complex structure, and utterances about them consequently do as well. We devise and evaluate a model that processes descriptions of complex configurations of geometric shapes and can identify the described scenes among a set of candidates, including similar distractors. The model works with raw images of scenes, and by design can work word-by-word incrementally. Hence, it can be used in highly-responsive interactive and situated settings. Using a corpus of descriptions from game-play between human subjects (who found this to be a challenging task), we show that reconstruction of description structure in our system contributes to task success and supports the performance of the word-based model of grounded semantics that we use.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Paetzel, Maike; Qu, Cheng; Schlangen, David; DeVault, David
Toward incremental dialogue act segmentation in fast-paced interactive dialogue systems Proceedings Article
In: Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 252–262, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{manuvinakurike_toward_2016,
title = {Toward incremental dialogue act segmentation in fast-paced interactive dialogue systems},
author = {Ramesh Manuvinakurike and Maike Paetzel and Cheng Qu and David Schlangen and David DeVault},
url = {http://www.aclweb.org/anthology/W16-3632},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {252–262},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {In this paper, we present and evaluate an approach to incremental dialogue act (DA) segmentation and classification. Our approach utilizes prosodic, lexico-syntactic and contextual features, and achieves an encouraging level of performance in offline corpus-based evaluation as well as in simulated human-agent dialogues. Our approach uses a pipeline of sequential processing steps, and we investigate the contribution of different processing steps to DA segmentation errors. We present our results using both existing and new metrics for DA segmentation. The incremental DA segmentation capability described here may help future systems to allow more natural speech from users and enable more natural patterns of interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Tin, Jessica; Brown, Jeremy; Fritzsch, Elisabeth; Gabber, Shirley
Wochat Chatbot User Experience Summary Proceedings Article
In: Proceedings of the 2016 IVA: WOCHAT Workshop, Zerotype, Los Angeles, CA, 2016.
@inproceedings{gordon_wochat_2016,
title = {Wochat Chatbot User Experience Summary},
author = {Carla Gordon and Jessica Tin and Jeremy Brown and Elisabeth Fritzsch and Shirley Gabber},
url = {http://workshop.colips.org/wochat/documents/ST-281.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 IVA: WOCHAT Workshop},
publisher = {Zerotype},
address = {Los Angeles, CA},
abstract = {A team of 5 interns at the USC Institute for Creative Technologies interacted with 5 of the 6 chatbots; IRIS, Sammy, Sarah, TickTock and Joker. Unfortunately no one in our team could get the 6th chatbot, pyEliza, working. We found that there were certainly some chatbots that were better than others, and some of us were surprised by how distinct each bot felt from the others. One member commented on how they felt as though each different chatbot had an individual “voice” so to speak. Others were surprised by just how much of a “personality” the bots seemed to have. Most members of our team cited IRIS as their favorite, in terms of being capable of producing naturalistic conversation, with Sammy taking a close second. However, only one member of the team was able to interact with Sarah and TickTock, but that member cited TickTock as a capable conversation partner, and Sarah as being the best bot on a number of measures including appropriateness of responses and overall conversation cohesiveness. Therefore, perhaps if more members had been able to interact with Sarah and TickTock they may have ranked higher. Lastly, Joker was by far our least favorite, with whom no member of our team was able to have anything resembling a naturalistic or even cohesive conversation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ravi, Satheesh; Artstein, Ron
Language Portability for Dialogue Systems: Translating a Question-Answering System from English into Tamil Proceedings Article
In: Proceedings of the SIGDIAL 2016 Conference, pp. 111–116, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{ravi_language_2016,
title = {Language Portability for Dialogue Systems: Translating a Question-Answering System from English into Tamil},
author = {Satheesh Ravi and Ron Artstein},
url = {http://www.aclweb.org/anthology/W16-3614},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the SIGDIAL 2016 Conference},
pages = {111–116},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {A training and test set for a dialogue system in the form of linked questions and responses is translated from English into Tamil. Accuracy of identifying an appropriate response in Tamil is 79%, compared to the English accuracy of 89%, suggesting that translation can be useful to start up a dialogue system. Machine translation of Tamil inputs into English also results in 79% accuracy. However, machine translation of the English training data into Tamil results in a drop in accuracy to 54% when tested on manually authored Tamil, indicating that there is still a large gap before machine translated dialogue systems can interact with human users.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krämer, Nicole C.; Karacora, Bilge; Lucas, Gale; Dehghani, Morteza; Rüther, Gina; Gratch, Jonathan
In: Computers & Education, vol. 99, pp. 1–13, 2016, ISSN: 03601315.
@article{kramer_closing_2016,
title = {Closing the gender gap in STEM with friendly male instructors? On the effects of rapport behavior and gender of a virtual agent in an instructional interaction},
author = {Nicole C. Krämer and Bilge Karacora and Gale Lucas and Morteza Dehghani and Gina Rüther and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0360131516300835},
doi = {10.1016/j.compedu.2016.04.002},
issn = {03601315},
year = {2016},
date = {2016-08-01},
journal = {Computers & Education},
volume = {99},
pages = {1–13},
abstract = {While numerous research endeavors address the effects of pedagogical agents, the role of the agent´s gender and its rapport behavior has been neglected. We hypothesize that a minimal amount of behavioral realism induced by display of rapport is necessary for any social effects to occur in human-computer interaction. Further, in line with results from STEM research on female role models, we assume that especially for female learners a same sex agent will be beneficial. In a 2(student gender)x2(agent gender)x2(rapport behavior yes/no) between subjects design, we investigate whether virtual agents can help enhance participants’ performance, effort and motivation in mathematics. Female and male participants (N = 128) interacted with a male or female virtual agent that either displayed rapport or no rapport. Our results confirm the expected main effect of rapport. However, against expectations, our results do not support the assumption that a same sex agent is beneficial for female learners. Participants’ performance and effort were significantly enhanced when interacting with an agent of opposite gender that displayed rapport. Our results have implications on designing agents for education and training purposes.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks Proceedings Article
In: International Conference on Artificial General Intelligence, pp. 84–94, Springer, New York, NY, 2016, ISBN: 978-3-319-41649-6.
@inproceedings{rosenbloom_rethinking_2016,
title = {Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://link.springer.com/chapter/10.1007/978-3-319-41649-6_9},
doi = {10.1007/978-3-319-41649-6_9},
isbn = {978-3-319-41649-6},
year = {2016},
date = {2016-07-01},
booktitle = {International Conference on Artificial General Intelligence},
volume = {9782},
pages = {84–94},
publisher = {Springer},
address = {New York, NY},
abstract = {The status of Sigma’s grounding in graphical models is challenged by the ways in which their semantics has been violated while incorporating rule-based reasoning into them. This has led to a rethinking of what goes on in its graphical architecture, with results that include a straightforward extension to feedforward neural networks (although not yet with learning).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2009
Melo, Celso M.; Gratch, Jonathan
The Effect of Color on Expression of Joy and Sadness in Virtual Humans Proceedings Article
In: Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA), Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{de_melo_effect_2009,
title = {The Effect of Color on Expression of Joy and Sadness in Virtual Humans},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Color%20on%20Expression%20of%20Joy%20and%20Sadness%20in%20Virtual%20Humans.pdf},
year = {2009},
date = {2009-09-01},
booktitle = {Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA)},
address = {Amsterdam, The Netherlands},
abstract = {For centuries artists have been exploring color to express emotions. Following this insight, the paper describes an approach to learn how to use color to influence the perception of emotions in virtual humans. First, a model of lighting and filters inspired on the visual arts is integrated with a virtual human platform to manipulate color. Next, an evolutionary model, based on genetic algorithms, is created to evolve mappings between emotions and lighting and filter parameters. A first study is, then, conducted where subjects evolve mappings for joy and sadness without being aware of the evolutionary model. In a second study, the features which characterize the mappings are analyzed. Results show that virtual human images of joy tend to be brighter, more saturated and have more colors than images of sadness. The paper discusses the relevance of the results for the fields of expression of emotions and virtual humans.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Gratch, Jonathan
Rapport and Facial Expression Proceedings Article
In: 3rd International Conference on Affective Computing and Intelligent Interaction (ACII 2009), Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{wang_rapport_2009,
title = {Rapport and Facial Expression},
author = {Ning Wang and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Rapport%20and%20Facial%20Expression.pdf},
year = {2009},
date = {2009-09-01},
booktitle = {3rd International Conference on Affective Computing and Intelligent Interaction (ACII 2009)},
address = {Amsterdam, The Netherlands},
abstract = {How to build virtual agents that establish rapport with human? According to Tickle-Degnen and Rosenthal [4], the three essential components of rapport are mutual attentiveness, positivity and coordination. In our previous work, we designed an embodied virtual agent to establish rapport with a human speaker by providing rapid and contingent nonverbal feedback [13] [22]. How do we know that a human speaker is feeling a sense of rapport? In this paper, we focus on the positivity component of rapport by investigating the relationship of human speakers' facial expressions on the establishment of rapport. We used an automatic facial expression coding tool called CERT to analyze the human dyad interactions and human-virtual human interactions. Results show that recognizing positive facial displays alone may be insufficient and that recognized negative facial displays was more diagnostic in assessing the level of rapport between participants.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Gratch, Jonathan
Interactants' Most Intimate Self-Disclosure in Interactions with Virtual Humans Proceedings Article
In: 9th International Conference on Intelligent Virtual Agents, Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kang_interactants_2009,
title = {Interactants' Most Intimate Self-Disclosure in Interactions with Virtual Humans},
author = {Sin-Hwa Kang and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Interactants%E2%80%99%20Most%20Intimate%20Self-Disclosure%20in%20Interactions%20with%20Virtual%20Humans.pdf},
year = {2009},
date = {2009-09-01},
booktitle = {9th International Conference on Intelligent Virtual Agents},
address = {Amsterdam, The Netherlands},
abstract = {This study explored the effect of the combination of visual fidelity of a virtual human and interactants' anticipated future interaction on self-disclosure in emotionally engaged and synchronous communication. The preliminary results were compared between interactions with embodied virtual agents and with real humans. We par-ticularly aimed at investigating ways to allow interactants' intimate self-disclosure while securing their anonymity, even with minimal cues of an embodied virtual agent, when interactants anticipate their future interaction with interaction partners. The results of preliminary data analysis showed that interactants revealed intimate information about their most common sexual fantasy when they had anticipated future interaction with their interaction partners.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Wang, Ning
Assessing the validity of a computational model of emotional coping Proceedings Article
In: International Conference on Affective Computing and Intelligent Interaction, Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_assessing_2009,
title = {Assessing the validity of a computational model of emotional coping},
author = {Stacy C. Marsella and Jonathan Gratch and Ning Wang},
url = {http://ict.usc.edu/pubs/Assessing%20the%20validity%20of%20a%20computational%20model%20of%20emotional%20coping.pdf},
year = {2009},
date = {2009-09-01},
booktitle = {International Conference on Affective Computing and Intelligent Interaction},
address = {Amsterdam, The Netherlands},
abstract = {In this paper we describe the results of a rigorous empirical study evaluating the coping responses of a computational model of emotion. We discuss three key kinds of coping, Wishful Thinking, Resignation and Dis-tancing that impact an agent's beliefs, intentions and desires, and compare these coping responses to related work in the attitude change literature. We discuss the EMA computational model of emotion and identify sev-eral hypotheses it makes concerning these coping processes. We assess these hypotheses against the beha-vior of human subjects playing a competitive board game, using monetary gains and losses to induce emo-tion and coping. Subject's appraisals, emotional state and coping responses were indexed at key points throughout a game, revealing a pattern of subject's al-tering their beliefs, desires and intentions as the game unfolds. The results clearly support several of the hypo-theses on coping responses but also identify (a) exten-sions to how EMA models Wishful Thinking as well as (b) individual differences in subject's coping responses.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pütten, Astrid M.; Gratch, Jonathan; Kang, Sin-Hwa; Krämer, Nicole
It doesn't matter what you are! Comparing interacting with an autonomous virtual person with interacting with a virtually represented human Proceedings Article
In: Proceedings of the 6th Conference of the Media Psychology Division of the German Psychological Society, University of Duisberg-Essen, Germany, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{von_der_putten_it_2009,
title = {It doesn't matter what you are! Comparing interacting with an autonomous virtual person with interacting with a virtually represented human},
author = {Astrid M. Pütten and Jonathan Gratch and Sin-Hwa Kang and Nicole Krämer},
url = {http://ict.usc.edu/pubs/It%20doesnt%20matter%20what%20you%20are.pdf},
year = {2009},
date = {2009-09-01},
booktitle = {Proceedings of the 6th Conference of the Media Psychology Division of the German Psychological Society},
address = {University of Duisberg-Essen, Germany},
abstract = {According to the Threshold Model of Social Influence (Blascovich et al., 2002) the social influence of real persons will always be high, whereas the influence of an artificial entity depends on the realism of its behavior. Contrariwise, the Ethopeia concept (Nass & Moon, 2000) predicts that automatic social reactions are triggered by situations as soon as they include social cues. The presented study evaluates whether the participants' belief in interacting with either an avatar (a virtual representation of a human) or an agent (autonomous virtual person) lead to different social effects.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul
Towards a New Cognitive Hourglass: Uniform Implementation of Cognitive Architecture via Factor Graphs Proceedings Article
In: Proceedings of the 9th International Conference on Cognitive Modeling (ICCM 2009), Manchester, UK, 2009.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@inproceedings{rosenbloom_towards_2009,
title = {Towards a New Cognitive Hourglass: Uniform Implementation of Cognitive Architecture via Factor Graphs},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Towards%20a%20New%20Cognitive%20Hourglass.pdf},
year = {2009},
date = {2009-07-01},
booktitle = {Proceedings of the 9th International Conference on Cognitive Modeling (ICCM 2009)},
address = {Manchester, UK},
abstract = {As cognitive architectures become ever more ambitious in the range of phenomena they are to assist in producing and modeling, there is increasing pressure for diversity in the mechanisms they embody. Yet uniformity remains critical for both elegance and extensibility. Here, the search for uniformity is continued, but shifted downwards in the cognitive hierarchy to the implementation level. Factor graphs are explored as a promising core, with initial steps towards a reimplementation of Soar. The ultimate aim is a uniform implementation level for cognitive architectures affording both heightened elegance and expanded coverage.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Gratch, Jonathan
Can Virtual Human Build Rapport and Promote Learning? Proceedings Article
In: Annual Conference on Artificial Intelligence in Education, Brighton UK, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{wang_can_2009,
title = {Can Virtual Human Build Rapport and Promote Learning?},
author = {Ning Wang and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Can%20Virtual%20Human%20Build%20Rapport%20and%20Promote%20Learning.pdf},
year = {2009},
date = {2009-07-01},
booktitle = {Annual Conference on Artificial Intelligence in Education},
address = {Brighton UK},
abstract = {Research show that teacher’s nonverbal immediacy can have a positive impact on student’s cognitive learning and affect [31]. This paper investigates the effectiveness of nonverbal immediacy using a virtual human. The virtual human attempts to use immediacy feedback to create rapport with the learner. Results show that the virtual human established rapport with learners but did not help them achieve better learning results. The results also suggest that creating rapport is related to higher self-efficacy, and self-efficacy is related to better learning results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Whitman, Nicolle; Traum, David; Artstein, Ron
An Integrated Authoring Tool for Tactical Questioning Dialogue Systems Proceedings Article
In: Workshop on Knowledge and Reasoning in Practical Dialogue Systems, Pasadena, CA, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_integrated_2009,
title = {An Integrated Authoring Tool for Tactical Questioning Dialogue Systems},
author = {Sudeep Gandhe and Nicolle Whitman and David Traum and Ron Artstein},
url = {http://ict.usc.edu/pubs/An%20Integrated%20Authoring%20Tool%20for%20Tactical%20Questioning%20Dialogue%20Systems.pdf},
year = {2009},
date = {2009-07-01},
booktitle = {Workshop on Knowledge and Reasoning in Practical Dialogue Systems},
address = {Pasadena, CA},
abstract = {We present an integrated authoring tool for rapid prototyping of dialogue systems for virtual humans taking part in tactical questioning simulations. The tool helps domain experts, who may have little or no knowledge of linguistics or computer science, to build virtual characters that can play the role of the interviewee. Working in a top-down fashion, the authoring process begins with specifying a domain of knowledge for the character; the authoring tool generates all relevant dialogue acts and allows authors to assign the language that will be used to refer to the domain elements. The authoring tool can also be used to manipulate some aspects of the dialogue strategies employed by the virtual characters, and it also supports re-using some of the authored content across different characters.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Models of Culture for Virtual Human Conversation Proceedings Article
In: Human Computer Interaction International (HCII), pp. 434–440, San Diego, CA, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_models_2009,
title = {Models of Culture for Virtual Human Conversation},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Models%20of%20Culture%20for%20Virtual%20Human%20Conversation.pdf},
year = {2009},
date = {2009-07-01},
booktitle = {Human Computer Interaction International (HCII)},
pages = {434–440},
address = {San Diego, CA},
abstract = {In this paper, we survey different types of Models of culture for virtual humans. Virtual humans are artificial agents that include both a visual human-like body and intelligent cognition driving action of the body. Culture covers a wide range of common knowledge of behavior and communication that can be used in a number of ways including interpreting the meaning of ac- tion, establishing identity, expressing meaning, and inference about the per- former. We look at several examples of existing cultural models and point out remaining steps for a more full model of culture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gandhe, Sudeep; Rushforth, Michael; Traum, David
Viability of a Simple Dialogue Act Scheme for a Tactical Questioning Dialogue System Proceedings Article
In: DiaHolmia 2009, the 13th Annual Workshop on the Semantics and Pragmatics of Dialogue, pp. 43–50, Stockholm, Sweden, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_viability_2009,
title = {Viability of a Simple Dialogue Act Scheme for a Tactical Questioning Dialogue System},
author = {Ron Artstein and Sudeep Gandhe and Michael Rushforth and David Traum},
url = {http://ict.usc.edu/pubs/Viability%20of%20a%20Simple%20Dialogue%20Act%20Scheme%20for%20a%20Tactical%20Questioning%20Dialogue%20System.pdf},
year = {2009},
date = {2009-06-01},
booktitle = {DiaHolmia 2009, the 13th Annual Workshop on the Semantics and Pragmatics of Dialogue},
pages = {43–50},
address = {Stockholm, Sweden},
abstract = {User utterances in a spoken dialogue sys- tem for tactical questioning simulation were matched to a set of dialogue acts gen- erated automatically from a representation of facts as ⟨object, attribute, value⟩ triples and actions as ⟨character, action⟩ pairs. The representation currently covers about 50% of user utterances, and we show that a few extensions can increase coverage to 80% or more. This demonstrates the vi- ability of simple schemes for represent- ing question-answering dialogues in im- plemented systems.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Gratch, Jonathan
Don't Just Stare at Me! Proceedings Article
In: 28th ACM Conference on Human Factors in Computing Systems, Chicago, IL, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{wang_dont_2009,
title = {Don't Just Stare at Me!},
author = {Ning Wang and Jonathan Gratch},
url = {http://www.ict.usc.edu/pubs/Don't%20Just%20Stare%20at%20Me.pdf},
year = {2009},
date = {2009-06-01},
booktitle = {28th ACM Conference on Human Factors in Computing Systems},
address = {Chicago, IL},
abstract = {Communication is more effective and persuasive when par- ticipants establish rapport. Tickle-Degnen and Rosenthal [57] argue rapport arises when participants exhibit mutual attentiveness, positivity and coordination. In this paper, we investigate how these factors relate to perceptions of rap- port when users interact via avatars in virtual worlds. In this study, participants told a story to what they believed was the avatar of another participant. In fact, the avatar was a computer program that systematically manipulated levels of attentiveness, positivity and coordination. In contrast to Tickel-Degnen and Rosenthal's findings, high-levels of mutual attentiveness alone can dramatically lower percep- tions of rapport in avatar communication. Indeed, an agent that attempted to maximize mutual attention performed as poorly as an agent that was designed to convey boredom. Adding positivity and coordination to mutual attentiveness, on the other hand, greatly improved rapport. This work un- veils the dependencies between components of rapport and informs the design of agents and avatars in computer medi- ated communication.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Sagae, Kenji; Christian, Gwen; DeVault, David; Traum, David
Towards Natural Language Understanding of Partial Speech Recognition Results in Dialogue Systems Proceedings Article
In: Short Paper Proceedings of NAACL HLT 2009, Boulder, CO, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{sagae_towards_2009,
title = {Towards Natural Language Understanding of Partial Speech Recognition Results in Dialogue Systems},
author = {Kenji Sagae and Gwen Christian and David DeVault and David Traum},
url = {http://ict.usc.edu/pubs/Towards%20Natural%20Language%20Understanding%20of%20Partial%20Speech%20Recognition%20Results%20in%20Dialogue%20Systems.pdf},
year = {2009},
date = {2009-06-01},
booktitle = {Short Paper Proceedings of NAACL HLT 2009},
address = {Boulder, CO},
abstract = {We investigate natural language understand- ing of partial speech recognition results to equip a dialogue system with incremental lan- guage processing capabilities for more realis- tic human-computer conversations. We show that relatively high accuracy can be achieved in understanding of spontaneous utterances before utterances are completed.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Akker, Rieks; Traum, David
A comparison of addressee detection methods for multiparty conversations Proceedings Article
In: DiaHolmia 2009, the 13th Annual Workshop on the Semantics and Pragmatics of Dialogue, Stockholm, Sweden, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{op_den_akker_comparison_2009,
title = {A comparison of addressee detection methods for multiparty conversations},
author = {Rieks Akker and David Traum},
url = {http://ict.usc.edu/pubs/A%20comparison%20of%20addressee%20detection%20methods%20for%20multiparty%20conversations.pdf},
year = {2009},
date = {2009-06-01},
booktitle = {DiaHolmia 2009, the 13th Annual Workshop on the Semantics and Pragmatics of Dialogue},
address = {Stockholm, Sweden},
abstract = {Several algorithms have recently been pro- posed for recognizing addressees in a group conversational setting. These al- gorithms can rely on a variety of factors including previous conversational roles, gaze, and type of dialogue act. Both statistical supervised machine learning al- gorithms as well as rule based methods have been developed. In this paper, we compare several algorithms developed for several different genres of multiparty di- alogue, and propose a new synthesis al- gorithm that matches the performance of machine learning algorithms while main- taining the transparency of semantically meaningful rule-based algorithms.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Watt, James H.; Gratch, Jonathan
Associations between Interactants' Personality Traits and Their Feelings of Rapport in Interactions with Virtual Humans Proceedings Article
In: International Communication Association Conference, Chicago, IL, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kang_associations_2009,
title = {Associations between Interactants' Personality Traits and Their Feelings of Rapport in Interactions with Virtual Humans},
author = {Sin-Hwa Kang and James H. Watt and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Associations%20between%20Interactants%E2%80%99%20Personality%20Traits%20and%20Their%20Feelings%20of%20Rapport%20in%20Interactions%20with%20Virtual%20Humans.pdf},
year = {2009},
date = {2009-05-01},
booktitle = {International Communication Association Conference},
address = {Chicago, IL},
abstract = {This study explored associations between the personality traits of human subjects and their feelings of rapport when they interacted with either a virtual agent or a real human. The animated graphical agent, the Responsive agent, responded to real human subjects' storytelling behavior, using appropriately timed nonverbal (contingent) feedback. Interactants' personality factors of Extroversion, Agreeableness, Conscientiousness, and Openness were related to three self-reported components of rapport: Positivity, Attentiveness, and Coordination; and to three behavioral indica-tions of rapport: Meaningful Words, Disfluency, and Prolonged Words. The results revealed that subjects who scored higher on Conscientiousness reported higher rap-port when interacting with another human, while subjects who scored higher on Agreeableness reported higher rapport while interacting with a virtual agent. The effects of these personality variables differed significantly across the two experimental groups. The conclusions provide a step toward further development of rapport theory that contributes to enhancing the interactional fidelity of virtual humans.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gandhe, Sudeep; Gerten, Jillian; Leuski, Anton; Traum, David
Semi-formal Evaluation of Conversational Characters Proceedings Article
In: Grumberg, Orna; Kaminski, Michael; Katz, Shmuel; Wintner, Shuly (Ed.): Lecture Notes in Computer Science, pp. 22–35, Springer, Berlin, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_semi-formal_2009,
title = {Semi-formal Evaluation of Conversational Characters},
author = {Ron Artstein and Sudeep Gandhe and Jillian Gerten and Anton Leuski and David Traum},
editor = {Orna Grumberg and Michael Kaminski and Shmuel Katz and Shuly Wintner},
url = {http://ict.usc.edu/pubs/Semi-formal%20Evaluation%20of%20Conversational%20Characters.pdf},
year = {2009},
date = {2009-05-01},
booktitle = {Lecture Notes in Computer Science},
volume = {5533},
pages = {22–35},
publisher = {Springer},
address = {Berlin},
abstract = {Conversational dialogue systems cannot be evaluated in a fully formal manner, because dialogue is heavily dependent on context and current dialogue theory is not precise enough to specify a target output ahead of time. Instead, we evaluate dialogue systems in a semi-formal manner, using human judges to rate the coherence of a conversational character and correlating these judgments with measures extracted from within the system. We present a series of three evaluations of a single conversational character over the course of a year, demonstrating how this kind of evaluation helps bring about an improvement in overall dialogue coherence.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Gratch, Jonathan; Watt, James H.
The Effect of Affective Iconic Realism on Anonymous Interactants' Self-Disclosure Proceedings Article
In: Proceedings of the 27th ACM Computer-Human Interaction Conference, Boston, MA, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kang_effect_2009,
title = {The Effect of Affective Iconic Realism on Anonymous Interactants' Self-Disclosure},
author = {Sin-Hwa Kang and Jonathan Gratch and James H. Watt},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affective%20Iconic%20Realism%20on%20Anonymous%20Interactants%E2%80%99%20Self-Disclosure.pdf},
year = {2009},
date = {2009-04-01},
booktitle = {Proceedings of the 27th ACM Computer-Human Interaction Conference},
address = {Boston, MA},
abstract = {In this paper, we describe progress in research designed to explore the effect of the combination of avatars' visual fidelity and users8 anticipated future interaction on self-disclosure in emotionally engaged and synchronous communication. We particularly aim at exploring ways to allow users' self-disclosure while securing their anonymity, even with minimal cues of a virtual human, when users anticipate future interaction. The research investigates users' self-disclosure through measuring their behaviors and feelings of social presence in several dimensions. Design and implementation of the stimulus materials and equipments are complete and data collection has begun.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
EMA: A process model of appraisal dynamics Journal Article
In: Journal of Cognitive Systems Research, vol. 10, no. 1, pp. 70–90, 2009.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{marsella_ema_2009,
title = {EMA: A process model of appraisal dynamics},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/EMA-%20A%20process%20model%20of%20appraisal%20dynamics.pdf},
year = {2009},
date = {2009-03-01},
journal = {Journal of Cognitive Systems Research},
volume = {10},
number = {1},
pages = {70–90},
abstract = {A computational model of emotion must explain both the rapid dynamics of some emotional reactions as well as the slower responses that follow deliberation. This is often addressed by positing multiple levels of appraisal processes such as fast pattern directed vs. slower deliberative appraisals. In our view, this confuses appraisal with inference. Rather, we argue for a single and automatic appraisal process that operates over a person's interpretation of their relationship to the environment. Dynamics arise from perceptual and inferential processes operating on this interpretation (including deliberative and reactive processes). This article discusses current developments in a computational model of emotion processes and illustrates how a single-level model of appraisal obviates a multi-level approach within the context of modeling a naturalistic emotional situation.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rushforth, Michael; Gandhe, Sudeep; Artstein, Ron; Roque, Antonio; Ali, Sarrah; Whitman, Nicolle; Traum, David
Varying Personality in Spoken Dialogue with a Virtual Human Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference (IVA-09), 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{rushforth_varying_2009-1,
title = {Varying Personality in Spoken Dialogue with a Virtual Human},
author = {Michael Rushforth and Sudeep Gandhe and Ron Artstein and Antonio Roque and Sarrah Ali and Nicolle Whitman and David Traum},
url = {http://ict.usc.edu/pubs/Varying%20Personality%20in%20Spoken%20Dialogue%20with%20a%20Virtual%20Human.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference (IVA-09)},
abstract = {We extend a virtual human architecture that has been used to build tactical questioning characters with a parameterizable personality model, allowing characters to be designed with di⬚erent personalities, allowing a richer set of possible user interactions in a training environment. Two experiments were carried out to evaluate the framework. In the ⬚rst, it was determined that personality models do have an impact on user perception of several aspects of the personality of the character. In the second, a model of assertiveness was evaluated and found to have a small but signi⬚cant impact on the users who interacted with the full virtual human, and larger di⬚erences in judgement of annotators who examined only the verbal transcripts of the interaction.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
Creative Expression of Emotions in Virtual Humans Proceedings Article
In: Proceedings of the International Conference on the Foundations of Digital Games, Port Canaveral, FL, 2009.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{de_melo_creative_2009,
title = {Creative Expression of Emotions in Virtual Humans},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://www.ict.usc.edu/pubs/Creative%20Expression%20of%20Emotions%20in%20Virtual%20Humans.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the International Conference on the Foundations of Digital Games},
address = {Port Canaveral, FL},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Newman, Brad; Parsons, Thomas D.; Difede, JoAnn; Reger, Greg; Holloway, Kevin; Gahm, Greg; McLay, Robert N.; Johnston, Scott; Graap, Ken; Spitalnick, Josh; Bordnick, Patrick; Rothbaum, Barbara O.
Development and Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD Proceedings Article
In: Proceedings of IEEE Explore: Virtual Rehabilitation 2009, 2009.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_development_2009,
title = {Development and Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD},
author = {Albert Rizzo and Brad Newman and Thomas D. Parsons and JoAnn Difede and Greg Reger and Kevin Holloway and Greg Gahm and Robert N. McLay and Scott Johnston and Ken Graap and Josh Spitalnick and Patrick Bordnick and Barbara O. Rothbaum},
url = {http://ict.usc.edu/pubs/Development%20and%20Clinical%20Results%20from%20the%20Virtual%20Iraq%20Exposure%20Therapy%20Application%20for%20PTSD.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of IEEE Explore: Virtual Rehabilitation 2009},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by exposure to an extreme traumatic stressor involving direct personal experience of (or witnessing/learning about) an event that involves actual or threatened death or serious injury, or other threat to one's physical integrity including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Such incidents would be distressing to almost anyone, and are usually experienced with intense fear, horror, and helplessness. Initial data suggests that at least 1 out of 5 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been previously used with reports of positive outcomes. The current paper will present the rationale and description of a VR PTSD therapy application (Virtual Iraq/Afghanistan) and present initial findings from a number of early studies of its use with active duty service members. Virtual Iraq/Afghanistan consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Results from an open clinical trial using Virtual Iraq with 20 treatment completers indicated that 16 no longer met PTSD diagnostic criteria at post-treatment, with only one not maintaining treatment gains at 3 month follow-up.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Wang, Ning; Stankovic, Brooke
Assessing the validity of appraisal-based models of emotion Proceedings Article
In: Proceedings of the International Conference on Affective Computing and Intelligent Interaction (ACII), Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_assessing_2009,
title = {Assessing the validity of appraisal-based models of emotion},
author = {Jonathan Gratch and Stacy C. Marsella and Ning Wang and Brooke Stankovic},
url = {http://ict.usc.edu/pubs/Assessing%20the%20validity%20of%20appraisal-based%20models%20of%20emotion.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the International Conference on Affective Computing and Intelligent Interaction (ACII)},
address = {Amsterdam, The Netherlands},
abstract = {We describe an empirical study comparing the accuracy of competing computational models of emotion in predicting human emotional responses in naturalistic emotion-eliciting situations. The results find clear differences in modelss ability to forecast human emotional responses, and provide guidance on how to develop more accurate models of human emotion.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Kok, Iwan; Gratch, Jonathan
A Probabilistic Multimodal Approach for Predicting Listener Backchannels Journal Article
In: Journal of Autonomous Agents and Multi-Agent Systems, vol. 20, no. 1, pp. 70–84, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{morency_probabilistic_2009,
title = {A Probabilistic Multimodal Approach for Predicting Listener Backchannels},
author = {Louis-Philippe Morency and Iwan Kok and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Probabilistic%20Multimodal%20Approach%20for%20Predicting%20Listener%20Backchannels.pdf},
year = {2009},
date = {2009-01-01},
journal = {Journal of Autonomous Agents and Multi-Agent Systems},
volume = {20},
number = {1},
pages = {70–84},
abstract = {During face-to-face interactions, listeners use backchannel feedback such as head nods as a signal to the speaker that the communication is working and that they should continue speaking. Predicting these backchannel opportunities is an important milestone for building engaging and natural virtual humans. In this paper we show how sequential probabilistic models (e.g., Hidden Markov Model or Conditional Random Fields) can automatically learn from a database of human-to-human interactions to predict listener backchannels using the speaker multimodal output features (e.g., prosody, spoken words and eye gaze). The main challenges addressed in this paper are automatic selection of the relevant features and optimal feature representation for probabilistic models. For prediction of visual backchannel cues (i.e., head nods), our prediction model shows a statistically significant improvement over a previously published approach based on hand-crafted rules.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Rizzo, Albert
Evaluation of Novice and Expert Interpersonal Interaction Skills with a Virtual Patient Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA), pp. 511–512, Amsterdam, 2009.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_evaluation_2009,
title = {Evaluation of Novice and Expert Interpersonal Interaction Skills with a Virtual Patient},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Novice%20and%20Expert%20Interpersonal%20Interaction%20Skills%20with%20a%20Virtual%20Patient.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA)},
volume = {5773},
pages = {511–512},
address = {Amsterdam},
abstract = {Interactive Virtual Standardized Patients (VP) can provide meaningful training for clinicians. These VP’s portray interactive embodied conversational characters with realistic representations of a mental or physical problem to be diagnosed or discussed. This research is a continuation of evaluating of our VP "Justina" which suffers from Posttraumatic Stress Disorder (PTSD) from a sexual attack and presents the results of comparing novices, test subjects without medical training, and experts interacting with 'Justina' to find out if they could elicit the proper responses to make a diagnosis and to investigate the topics and questions the novices asked for coverage of the categories and criteria of PTSD as defined in the DSM-IV. It is assumed that novices will perform better than experts, however the main investigation is to gather empirical data and understand why this is true and how this can be used to improve the system. There have not been, to the authors' knowledge, any studies in evaluating experts and non-experts with virtual human characters in the psychological domain.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul
A Graphical Rethinking of the Cognitive Inner Loop Proceedings Article
In: Proceedings of the The First International Workshop on Graphical Representations for Knowledge Representation and Reasoning, 2009.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@inproceedings{rosenbloom_graphical_2009,
title = {A Graphical Rethinking of the Cognitive Inner Loop},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/A%20Graphical%20Rethinking%20of%20the%20Cognitive%20Inner%20Loop.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the The First International Workshop on Graphical Representations for Knowledge Representation and Reasoning},
abstract = {Explorations of graphical representation and rea- soning have yielded intriguing results spanning symbol, probability and signal processing. Here we explore an integrative application of graphs, as a path towards cognitive architectures of increased elegance, functionality, and extensibility. The spe- cific focus is on steps towards a graphical reim- plementation and extension of the cognitive inner loop within the Soar architecture. Alchemy, an im- plementation of Markov logic, is used for initial experiments, yielding insights into what will ulti- mately be required for full graphical implementa- tions of enhanced cognitive inner loops.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Kenny, Patrick G.; Cosand, Louise; Iyer, Arvind; Courtney, Chris; Rizzo, Albert
A Virtual Human Agent for Assessing Bias in Novice Therapists Journal Article
In: Medicine Meets Virtual Reality, vol. 17, pp. 253–258, 2009.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{parsons_virtual_2009,
title = {A Virtual Human Agent for Assessing Bias in Novice Therapists},
author = {Thomas D. Parsons and Patrick G. Kenny and Louise Cosand and Arvind Iyer and Chris Courtney and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Human%20Agent%20for%20Assessing%20Bias%20in%20Novice%20Therapists.pdf},
doi = {10.3233/978-1-58603-964-6-253},
year = {2009},
date = {2009-01-01},
journal = {Medicine Meets Virtual Reality},
volume = {17},
pages = {253–258},
abstract = {Monitoring the psychological and physiological activity of persons interacting with virtual humans poses exacting measurement challenges. Three experiments are reported in this paper. In these experiments we made use of Virtual Human Agent technology to assess persons' psychological and physiological responses to Virtual Standardized Patients. The first experiment provided support for the usability of the Virtual Standardized Patients through the use of a virtual character emulating an adolescent male with conduct disorder. In the second experiment we further developed the technology and aimed at assessing whether novice mental health clinicians could conduct an interview with a virtual character that emulates an adolescent female who has recently been physically traumatized. The third experiment looked at the usability of Virtual Standardized Patients for eliciting psychophysiological responses following exposure to virtual humans representing different ethnicities.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Gratch, Jonathan
Expression of Emotions using Wrinkles, Blushing, Sweating and Tears Proceedings Article
In: Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA), Amsterdam, The Netherlands, 2009, ISBN: 978-3-642-04379-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{de_melo_expression_2009,
title = {Expression of Emotions using Wrinkles, Blushing, Sweating and Tears},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Expression%20of%20Emotions%20using%20Wrinkles%20Blushing%20Sweating%20and%20Tears.pdf},
doi = {10.1007/978-3-642-04380-2_23},
isbn = {978-3-642-04379-6},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA)},
address = {Amsterdam, The Netherlands},
abstract = {Wrinkles, blushing, sweating and tears are physiological manifestations of emotions in humans. Therefore, the simulation of these phenomena is important for the goal of building believable virtual humans which interact naturally and effectively with humans. This paper describes a real-time model for the simulation of wrinkles, blushing, sweating and tears. A study is also conducted to assess the influence of the model on the perception of surprise, sadness, anger, shame, pride and fear. The study follows a repeatedmeasures design where subjects compare how well is each emotion expressed by virtual humans with or without these phenomena. The results reveal a significant positive effect on the perception of surprise, sadness, anger, shame and fear. The relevance of these results is discussed for the fields of virtual humans and expression of emotions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Roque, Antonio; Leuski, Anton; Morie, Jacquelyn; Traum, David
A Virtual Tour Guide for Virtual Worlds Proceedings Article
In: Intelligent Virtual Agents Conference (IVA), Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans, Virtual Worlds
@inproceedings{jan_virtual_2009,
title = {A Virtual Tour Guide for Virtual Worlds},
author = {Dusan Jan and Antonio Roque and Anton Leuski and Jacquelyn Morie and David Traum},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Tour%20Guide%20for%20Virtual%20Worlds.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Intelligent Virtual Agents Conference (IVA)},
address = {Amsterdam, The Netherlands},
abstract = {In this paper we present an implementation of a embodied conversational agent that serves as a virtual tour guide in Second Life. We show how we combined the abilities of a conversational agent with navigation in the world and present some preliminary evaluation results.},
keywords = {Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Modeling Social Inference in Virtual Agents Proceedings Article
In: AI & Society, pp. 5–11, Trento, Italy, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_modeling_2009,
title = {Modeling Social Inference in Virtual Agents},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20Social%20Inference%20in%20Virtual%20Agents.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {AI & Society},
volume = {24(1)},
pages = {5–11},
address = {Trento, Italy},
abstract = {Social judgment is a social inference process whereby an agent singles out individuals to blame or credit for multi-agent activities. Such inferences are a key aspect of social intelligence that underlie social planning, social learning, natural language pragmatics and computational models of emotion. With the advance of multi-agent interactive systems and the need of designing socially aware systems and interfaces to interact with people, it is increasingly important to model this human-centric form of social inference. Based on psychological attribution theory, this paper presents a general computational framework to automate social inference based on an agent�s causal knowledge and observations of interaction.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Gratch, Jonathan; Leuski, Anton; Morency, Louis-Philippe; Marsella, Stacy C.; Liewer, Matt; Doraiswamy, Prathibha; Weiss, Lori; LeMasters, Kim; Fast, Edward; Sadek, Ramy; Marshall, Andrew; Lee, Jina; Thiebaux, Marcus; Tsiartas, Andreas
At the Virtual Frontier: Introducing Gunslinger, a Multi- Character, Mixed-Reality, Story-Driven Experience Proceedings Article
In: Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA), Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{hartholt_at_2009,
title = {At the Virtual Frontier: Introducing Gunslinger, a Multi- Character, Mixed-Reality, Story-Driven Experience},
author = {Arno Hartholt and Jonathan Gratch and Anton Leuski and Louis-Philippe Morency and Stacy C. Marsella and Matt Liewer and Prathibha Doraiswamy and Lori Weiss and Kim LeMasters and Edward Fast and Ramy Sadek and Andrew Marshall and Jina Lee and Marcus Thiebaux and Andreas Tsiartas},
url = {http://ict.usc.edu/pubs/At%20the%20Virtual%20Frontier-%20Introducing%20Gunslinger%20a%20Multi-%20Character%20Mixed-Reality%20Story-Driven%20Experience.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA)},
address = {Amsterdam, The Netherlands},
abstract = {We describe an application of intelligent virtual agents to the domain of mixed-reality interactive entertainment. Gunslinger allows users to interact with life-sized virtual humans within the context of a wild west story world. The application incorporates a novel integration of capabilities including gesture and spoken language recognition, story and dialogue reasoning, and multi-character, multi-modal behavior generation and synthesis. The article describes our design process, technological innovations, and initial feedback from user interactions with the system.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Kok, Iwan; Gratch, Jonathan
Predicting Listener Backchannels: A Probabilistic Multimodal Approach Proceedings Article
In: Autonomous Agents and Multi-Agent Systems, Tokyo, Japan, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{morency_predicting_2009,
title = {Predicting Listener Backchannels: A Probabilistic Multimodal Approach},
author = {Louis-Philippe Morency and Iwan Kok and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Predicting%20Listener%20Backchannels-%20A%20Probabilistic%20Multimodal%20Approach.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Autonomous Agents and Multi-Agent Systems},
address = {Tokyo, Japan},
abstract = {During face-to-face interactions, listeners use backchannel feedback such as head nods as a signal to the speaker that the communication is working and that they should continue speaking. Predicting these backchannel opportunities is an important milestone for building engaging and natural virtual humans. In this paper we show how sequential probabilistic models (e.g., Hidden Markov Model or Conditional Random Fields) can automatically learn from a database of human-to-human interactions to predict listener backchannels using the speaker multimodal output features (e.g., prosody, spoken words and eye gaze). The main challenges addressed in this paper are automatic selection of the relevant features and optimal feature representation for probabilistic models. For prediction of visual backchannel cues (i.e., head nods), our prediction model shows a statistically significant improvement over a previously published approach based on hand-crafted rules.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
Improving a Virtual Human Using a Model of Degrees of Grounding Proceedings Article
In: Proceedings of International Joint Conerence on Artificial Intelligence IJCAI-09, Pasadena, CA, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_improving_2009,
title = {Improving a Virtual Human Using a Model of Degrees of Grounding},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/Improving%20a%20Virtual%20Human%20Using%20a%20Model%20of%20Degrees%20of%20Grounding.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of International Joint Conerence on Artificial Intelligence IJCAI-09},
address = {Pasadena, CA},
abstract = {We describe the Degrees of Grounding model, which tracks the extent to which material has reached mutual belief in a dialogue, and conduct experiments in which the model is used to manage grounding behavior in spoken dialogues with a virtual human. We show that the model produces improvements in virtual human performance as measured by post-session questionnaires.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Benotti, Luciana; Traum, David
A computational account of comparative implicatures for a spoken dialogue agent Proceedings Article
In: Proceedings of the 8th International Conference on Computational Semantics, Tilburg, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{benotti_computational_2009,
title = {A computational account of comparative implicatures for a spoken dialogue agent},
author = {Luciana Benotti and David Traum},
url = {http://ict.usc.edu/pubs/A%20computational%20account%20of%20comparative%20implicatures%20for%20a%20spoken%20dialogue%20agent.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the 8th International Conference on Computational Semantics},
address = {Tilburg, The Netherlands},
abstract = {Comparative constructions are common in dialogue, especially in negotiative dialogue where a choice must be made between different options, and options must be evaluated using multiple metrics. Com- paratives explicitly assert a relationship between two elements along a scale, but they may also implicate positions on the scale especially if constraints on the possible values are present. Dialogue systems must often understand more from a comparative than the explicit assertion in order to understand why the comparative was uttered. In this paper we examine the pragmatic meaning of comparative constructions from a computational perspective.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Petta, Paola
Modeling the Cognitive Antecedents and Consequences of Emotion Journal Article
In: Journal of Cognitive Systems Research, vol. 10, no. 1, pp. 1–5, 2009.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_modeling_2009,
title = {Modeling the Cognitive Antecedents and Consequences of Emotion},
author = {Jonathan Gratch and Stacy C. Marsella and Paola Petta},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Cognitive%20Antecedents%20and%20Consequences%20of%20Emotion.pdf},
year = {2009},
date = {2009-01-01},
journal = {Journal of Cognitive Systems Research},
volume = {10},
number = {1},
pages = {1–5},
abstract = {The last decade has seen an explosion of interest in emotion in both the social and computational sciences. Emotions arise from interactions with both people and technology. They color human perception and decision making and shape a person's moment-to-moment responses to their social and physical environment. Emotions are expressed through changes in speech, facial expression, posture and physiological processes, and these changes provide essential clues to a person's beliefs, desires, intentions and likely future behavior. Recognizing and exploiting such influences can have broad impact across a variety of disciplines: Incorporating the influence of emotion greatly increases explanatory power of models of human decision making (Loewenstein & Lerner, 2003); Responding to a student's emotions can enhance the effectiveness of human or computer tutors (Conati & MacLaren, 2004; Graesser et al., 2008; Lepper, 1988); And modeling emotional influences can enhance the fidelity of social simulations, including how crowds react in disasters (Lyell, Flo, & Mejia-Tellez, 2006; Silverman, Johns, O'Brien, Weaver, & Cornwell, 2002), how military units respond to the stress of battle (Gratch & Marsella, 2003), and even large social situations as when modeling the economic impact of traumatic events such as 9/11 or modeling inter-group conflicts (Marsella, Pynadath, & Read, 2004). More generally, an understanding of the cognitive and social function of human emotion complements the rational, individualistic and disembodied view of cognition that underlies most artificial intelligence and cognitive system research. Emotional influences that seem irrational on the surface may have important social and cognitive functions that would be required by any intelligent system. For example, Herb Simon (1967) theorized that emotions serve to interrupt normal cognition when unattended goals require servicing. Robert Frank argues that social emotions such as anger and guilt reflect a mechanism that improves group utility by minimizing social conflicts, and thereby explains people's "irrational" choices to cooperate in social games such as the prisoner's dilemma (Frank, 1988). Similarly, Alfred Mele (2001) claims that "emotional biases" such as wishful thinking reflect a rational mechanism that more accurately accounts for social costs, such as the cost of betrayal when a parent defends a child despite strong evidence of their guilt in a crime (see also Ito, Pynadath, & Marsella, 2008). At the same time, findings on non-conscious judgments (e.g., Barrett, Ochsner, & Gross, 2007; Moors, De Houwer, Hermans, & Eelen, 2005) have enriched our understanding of how cognitive style is shaped by the socio-emotional context, often in adaptive ways. More broadly, appraisal theorists such as Lazarus (1991), Frijda (1987) and Scherer (2001) have argued that emotions are intimately connected with how organisms sense events, relate them to internal needs (e.g., is this an opportunity or a threat?), characterize appropriate responses (e.g., fight, flight or plan) and recruit the cognitive, physical and social resources needed to adaptively respond. Thus, an understanding of emotion's function can inform the design of cognitive systems that must survive in a dynamic, semi-predictable and social world. This special issue of the Journal of Cognitive Systems Research gives a cross-section of contemporary psychological and computational research on the interplay of cognition and emotion. The articles arise from a recent interdisciplinary symposium on Modeling the Cognitive Antecedents and Consequences of Emotion that brought together leaders in psychological and computational approaches to emotion for three days of intense discussion. The articles represent the current state of an ongoing discussing to bridge the divide between computational and psychological perspectives on emotion, illustrating both that theories on the function of emotion in human cognition can yield key insights into the design and control of intelligent entities in general, and that computational models of human mental processes can inform psychological theories through the exercise of concretizing them into working and testable systems.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; DeJong, Gerald
A Decision-theoretic Approach to Adaptive Problem Solving Proceedings Article
In: Proceedings of the International Conference on Interactive Digital Storytelling, Guimarães, Portugal, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_decision-theoretic_2009,
title = {A Decision-theoretic Approach to Adaptive Problem Solving},
author = {Jonathan Gratch and Gerald DeJong},
url = {http://ict.usc.edu/pubs/A%20Decision-theoretic%20Approach%20to%20Adaptive%20Problem%20Solving.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the International Conference on Interactive Digital Storytelling},
address = {Guimarães, Portugal},
abstract = {Computer aided interactive narrative has received increasing attention in recent years. Automated directorial control that manages the development of the story in the face of user interaction is an important aspect of interactive narrative design. Most existing approaches lack an explicit model of the user. This limits the approaches' ability of predicting the user's experience, and hence undermines the effectiveness of the approaches. Thespian is a multi-agent framework for authoring and simulating interactive narratives with explicit models of the user. This work extends Thespian with the ability to provide proactive directorial control using the user model. In this paper, we present the algorithms in detail, followed by examples.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Antonisse, Jamie; Bouchard, Sean; Chance, Eric
Virtual Worlds as a Healing Modality for Returning Soldiers and Veterans Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine; Studies in Health Technology and Informatics, IOS Press, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans, Virtual Worlds
@inproceedings{morie_virtual_2009,
title = {Virtual Worlds as a Healing Modality for Returning Soldiers and Veterans},
author = {Jacquelyn Morie and Jamie Antonisse and Sean Bouchard and Eric Chance},
url = {http://ict.usc.edu/pubs/Virtual%20Worlds%20as%20a%20Healing%20Modality%20for%20Returning%20Soldiers%20and%20Veterans.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine; Studies in Health Technology and Informatics},
volume = {144},
publisher = {IOS Press},
abstract = {Those who have served in recent conflicts face many challenges as they reintegrate into society. In addition to recovering from physical wounds, traumatic brain injury and post-traumatic stress disorders, many soldiers also face basic psychological issues about who they are and how to find their place in a society that has not shared their experiences. To address these challenges, we have created a space that provides ongoing opportunities for healing activities, personal exploration and social camaraderie in an online virtual world, Second Life. In such worlds, where each avatar is controlled by a live individual, experiences can be unintuitive, uninviting, considered boring or difficult to control. To counter this, we are implementing autonomous intelligent agent avatars that can be "on duty" 24/7, serving as guides and information repositories, making the space and activities easy to find and even personalized to the visitor's needs. We report the results of usability testing with an in-world veterans' group. Tests comparing soldiers who use this space as part of their reintegration regimen compared to those who do not are being scheduled as part of the Army's Warriors in Transition program.},
keywords = {Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Difede, JoAnn; Rothbaum, Barbara O.; Johnston, Scott; McLay, Robert N.; Reger, Greg; Gahm, Greg; Parsons, Thomas D.; Graap, Ken; Pair, Jarrell
VR PTSD Exposure Therapy Results with Active Duty OIF/OEF Combatants Journal Article
In: Medicine Meets Virtual Reality, vol. 17, 2009.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_vr_2009,
title = {VR PTSD Exposure Therapy Results with Active Duty OIF/OEF Combatants},
author = {Albert Rizzo and JoAnn Difede and Barbara O. Rothbaum and Scott Johnston and Robert N. McLay and Greg Reger and Greg Gahm and Thomas D. Parsons and Ken Graap and Jarrell Pair},
url = {http://ict.usc.edu/pubs/VR%20PTSD%20Exposure%20Therapy%20Results%20with%20Active%20Duty%20OIF%20OEF%20Combatants.pdf},
year = {2009},
date = {2009-01-01},
journal = {Medicine Meets Virtual Reality},
volume = {17},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Reports indicate that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality exposure therapy has been previously used for PTSD with reports of positive outcomes. This paper will present a brief description of the USC/ICT Virtual Iraq/Afghanistan PTSD therapy application and present clinical outcome data from active duty patients treated at the Naval Medical Center-San Diego (NMCSD) as of October 2009. Initial outcomes from the first twenty patients to complete treatment indicate that 16 no longer meet diagnostic criteria for PTSD at post treatment. Research and clinical tests using the Virtual Iraq/Afghanistan software are also currently underway at Weill Cornell Medical College, Emory University, Fort Lewis and WRAMC along with 20 other test sites.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rushforth, Michael; Gandhe, Sudeep; Artstein, Ron; Roque, Antonio; Ali, Sarrah; Whitman, Nicolle; Traum, David
Varying Personality in Spoken Dialogue with a Virtual Human Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference (IVA-09), 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{rushforth_varying_2009,
title = {Varying Personality in Spoken Dialogue with a Virtual Human},
author = {Michael Rushforth and Sudeep Gandhe and Ron Artstein and Antonio Roque and Sarrah Ali and Nicolle Whitman and David Traum},
url = {http://ict.usc.edu/pubs/Varying%20Personality%20in%20Spoken%20Dialogue%20with%20a%20Virtual%20Human.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference (IVA-09)},
number = {ICT TR 03 2009},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We extend a virtual human architecture that has been used to build tactical questioning characters with a parameterizable personality model, allowing characters to be designed with di⬚erent personalities, allowing a richer set of possible user interactions in a training environment. Two experiments were carried out to evaluate the framework. In the ⬚rst, it was determined that personality models do have an impact on user perception of several aspects of the personality of the character. In the second, a model of assertiveness was evaluated and found to have a small but signi⬚cant impact on the users who interacted with the full virtual human, and larger di⬚erences in judgement of annotators who examined only the verbal transcripts of the interaction.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2008
Artstein, Ron; Cannon, Jacob; Gandhe, Sudeep; Gerten, Jillian; Henderer, Joe; Leuski, Anton; Traum, David
Coherence of Off-Topic Response for a Virtual Character Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_coherence_2008,
title = {Coherence of Off-Topic Response for a Virtual Character},
author = {Ron Artstein and Jacob Cannon and Sudeep Gandhe and Jillian Gerten and Joe Henderer and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/COHERENCE%20OF%20OFF-TOPIC%20RESPONSES%20FOR%20A%20VIRTUAL%20CHARACTER.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {We demonstrate three classes of off-topic responses which allow a virtual question-answering character to handle cases where it does not understand the user's input: ask for clarification, indicate misunderstanding, and move on with the conversation. While falling short of full dialogue management, a combination of such responses together with prompts to change the topic can improve overall dialogue coherence.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Parsons, Thomas D.; Pair, Jarrell; McLay, Robert N.; Johnston, Scott; Perlman, Karen; Deal, Robert; Reger, Greg; Gahm, Greg; Roy, Michael; Shilling, Russell; Rothbaum, Barbara O.; Graap, Ken; Spitalnick, Josh; Bordnick, Patrick; Difede, JoAnn
Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_clinical_2008,
title = {Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD},
author = {Albert Rizzo and Thomas D. Parsons and Jarrell Pair and Robert N. McLay and Scott Johnston and Karen Perlman and Robert Deal and Greg Reger and Greg Gahm and Michael Roy and Russell Shilling and Barbara O. Rothbaum and Ken Graap and Josh Spitalnick and Patrick Bordnick and JoAnn Difede},
url = {http://ict.usc.edu/pubs/Clinical%20Results%20from%20the%20Virtual%20Iraq%20Esposure%20Therapy%20Application%20for%20PTSD.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 5 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been previously used with reports of positive outcomes. The current paper will present the rationale and description of a VR PTSD therapy application (Virtual Iraq) and present initial findings from its use with active duty service members. Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Results from an open clinical trial using Virtual Iraq at the Naval Medical Center-San Diego with 20 treatment completers indicate that 16 no longer met PTSD diagnostic criteria at post-treatment, with only one not maintaining treatment gains at 3 month follow-up.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Traum, David
A Statistical Approach for Text Processing in Virtual Humans Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_statistical_2008,
title = {A Statistical Approach for Text Processing in Virtual Humans},
author = {Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/A%20STATISTICAL%20APPROACH%20FOR%20TEXT%20PROCESSING%20IN%20VIRTUAL%20HUMANS.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {We describe a text classi⬚cation approach based on statistical language modeling. We show how this approach can be used for several natural language processing tasks in a virtual human system. Speci⬚cally, we show it can applied to language understanding, language generation, and character response selection tasks. We illustrate these applications with some experimental results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe
Real-time Head Pose Estimation Using a Webcam: Monocular Adaptive View-based Appearance Model Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{morency_real-time_2008,
title = {Real-time Head Pose Estimation Using a Webcam: Monocular Adaptive View-based Appearance Model},
author = {Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/REAL-TIME%20HEAD%20POSE%20ESTIMATION%20USING%20A%20WEBCAM-%20MONOCULAR%20ADAPTIVE%20VIEW-BASED%20APPEARANCE%20MODEL.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Accurately estimating the person's head position and orientation is an important task for a wide range of applications such as driver awareness and human-robot interaction. Over the past two decades, many approaches have been suggested to solve this problem, each with its own advantages and disadvantages. In this paper, we present a probabilistic framework called Monocular Adaptive View-based Appearance Model (MAVAM) which integrates the advantages from two of these approaches: (1) the relative precision and user-independence of differential registration, and (2) the robustness and bounded drift of keyframe tracking. In our experiments, we show how the MAVAM model can be used to estimate head position and orientation in real-time using a simple monocular camera. Our experiments on two previously published datasets show that the MAVAM framework can accurately track for a long period of time (textbackslashtextbackslashtextbackslashtextbackslashtextgreater2 minutes) with an average accuracy of 3.9 degrees and 1.2in with an inertial sensor and a 3D magnetic sensor.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Leuski, Anton; Roque, Antonio; Gandhe, Sudeep; DeVault, David; Gerten, Jillian; Robinson, Susan; Martinovski, Bilyana
Natural Language Dialogue Architectures for Tactical Questioning Characters Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_natural_2008,
title = {Natural Language Dialogue Architectures for Tactical Questioning Characters},
author = {David Traum and Anton Leuski and Antonio Roque and Sudeep Gandhe and David DeVault and Jillian Gerten and Susan Robinson and Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Natural%20Language%20Dialogue%20Architectures.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper we contrast three architectures for natural language questioning characters. We contrast the relative costs and benefits of each approach in building characters for tactical questioning. The first architecture works purely at the textual level, using cross-language information retrieval techniques to learn the best output for any input from a training set of linked questions and answers. The second architecture adds a global emotional model and computes a compliance model, which can result in different outputs for different levels, given the same inputs. The third architecture works at a semantic level and allows authoring of different policies for response for different kinds of information. We describe these architectures and their strengths and weaknesses with respect to expressive capacity, performance, and authoring demands.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Rizzo, Albert
Virtual Human Patients for Training of Clinical Interview and Communication Skills Proceedings Article
In: Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology, Maia, Portugal, 2008, ISBN: 07 049 15 00 6.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{parsons_virtual_2008,
title = {Virtual Human Patients for Training of Clinical Interview and Communication Skills},
author = {Thomas D. Parsons and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Human%20Patients%20for%20Training%20of%20Clinical%20Interview%20and%20Communication%20Skills.pdf},
isbn = {07 049 15 00 6},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Maia, Portugal},
abstract = {Although schools commonly make use of standardized patients to teach interview skills, the diversity of the scenarios standardized patients can characterize is limited by availability of human actors. Virtual Human Agent technology has evolved to a point where esearchers may begin developing mental health applications that make use of virtual reality patients. The work presented here is a preliminary attempt at what we believe to be a large application area. Herein we describe an ongoing study of our virtual patients. We present an approach that allows novice mental health clinicians to conduct an interview with virtual character that emulates 1) an adolescent male with conduct disorder; and 2) an adolescent female who has recently been physically traumatized.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Gratch, Jonathan; Hartholt, Arno; Marsella, Stacy C.; Lee, Jina
Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents Proceedings Article
In: Proceedings of the 8th International Conference on Intelligent Virtual Agents, pp. 117–130, Tokyo, Japan, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_multi-party_2008,
title = {Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents},
author = {David Traum and Jonathan Gratch and Arno Hartholt and Stacy C. Marsella and Jina Lee},
url = {http://ict.usc.edu/pubs/Multi-party,%20Multi-issue,%20Multi-strategy%20Negotiation.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 8th International Conference on Intelligent Virtual Agents},
pages = {117–130},
address = {Tokyo, Japan},
abstract = {We present a model of negotiation for virtual agents that extends previous work to be more human-like and applicable to a broader range of situations, including more than two negotiators with different goals, and negotiating over multiple options. The agents can dynamically change their negotiating strategies based on the current values of several parameters and factors that can be updated in the course of the negotiation.We have implemented this model and done preliminary evaluation within a prototype training system and a three-party negotiation with two virtual humans and one human.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels Journal Article
In: Lecture Notes in Computer Science, vol. 5208, pp. 484–485, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{de_melo_evolving_2008,
title = {Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evolving%20Expression%20of%20Emotions%20in%20Virtual%20Humans%20Using%20Lights%20and%20Pixels.pdf},
year = {2008},
date = {2008-09-01},
journal = {Lecture Notes in Computer Science},
volume = {5208},
pages = {484–485},
abstract = {nspired by the arts, this paper addresses the challenge of expressing emotions in virtual humans using the environment's lights and the screen's pixels. An evolutionary approach is proposed which relies on genetic algorithms to learn how to map emotions into these forms of expression. The algorithm evolves populations of hypotheses, where each hypothesis represents a configuration of lighting and screen expression. Hypotheses are evaluated by a critic ensemble composed of artificial and human critics. The need for human critics is motivated by a study which reveals the limitations of an approach that relies only on artificial critics that follow principles from art literature. We also address the need for the model to improve with experience and to adapt to the individual, social and cultural values in the arts. Finally, a second study is described where subjects successfully evolved mappings for joy and sadness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gandhe, Sudeep; DeVault, David; Roque, Antonio; Martinovski, Bilyana; Artstein, Ron; Leuski, Anton; Gerten, Jillian; Traum, David
From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters Proceedings Article
In: Proceedings of InterSpeech, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_domain_2008,
title = {From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters},
author = {Sudeep Gandhe and David DeVault and Antonio Roque and Bilyana Martinovski and Ron Artstein and Anton Leuski and Jillian Gerten and David Traum},
url = {http://ict.usc.edu/pubs/From%20Domain%20Specification%20to%20Virtual%20Humans.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of InterSpeech},
abstract = {We present a new approach for rapidly developing dialogue capabilities for virtual humans. Starting from domain specification, an integrated authoring interface automatically generates dialogue acts with all possible contents.These dialogue acts are linked to example utterances in order to provide training data for natural language understanding and generation. The virtual human dialogue system contains a dialogue manager following the information-state approach, using finite-state machines and SCXML to manage local coherence, as well as explicit modeling of emotions and compliance level and a grounding component based on evidence of understanding. Using the authoring tools, we design and implement a version of the virtual human Hassan and compare to previous architectures for the character.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Solomon, Steve; Gratch, Jonathan; Bulitko, Vadim; Lent, Michael
Modeling Culturally and Emotionally Affected Behavior Proceedings Article
In: The 10th International Conference on the Simulation of Adaptive Behavior (SAB); Workshop on the role of emotion in adaptive behavior and cognitive robotics., Osaka, Japan, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{solomon_modeling_2008,
title = {Modeling Culturally and Emotionally Affected Behavior},
author = {Steve Solomon and Jonathan Gratch and Vadim Bulitko and Michael Lent},
url = {http://www.ict.usc.edu//pubs/Modeling Culturally and Emotionally Affected Behavior.pdf},
year = {2008},
date = {2008-07-01},
booktitle = {The 10th International Conference on the Simulation of Adaptive Behavior (SAB); Workshop on the role of emotion in adaptive behavior and cognitive robotics.},
address = {Osaka, Japan},
abstract = {Culture and emotions have a profound impact on human behavior. Consequently, high-fidelity simulated interactive environments (e.g., trainers and computer games) that involve virtual humans must model socio-cultural and emotional affects on agent behavior. In this paper we discuss two recently fielded systems that do so independently: Culturally Affected Behavior (CAB) and EMotion and Adaptation (EMA). We then propose a simple language that combines the two systems in a natural way thereby enabling simultaneous simulation of culturally and emotionally affected behavior. The proposed language is based on matrix algebra and can be easily implemented on single- or multi-core hardware with a standard matrix package (e.g., MATLAB or a C++ library). We then show how to extend the combined culture and emotion model with an explicit representation of religion and personality profiles.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Rizzo, Albert
Virtual Humans for Assisted Health Care Proceedings Article
In: Pervasive Technologies for Assistive Environments (PETRA) Conference Proceedings, ACM, Athens, Greece, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_virtual_2008-1,
title = {Virtual Humans for Assisted Health Care},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20Assisted%20Health%20Care.pdf},
year = {2008},
date = {2008-07-01},
booktitle = {Pervasive Technologies for Assistive Environments (PETRA) Conference Proceedings},
publisher = {ACM},
address = {Athens, Greece},
abstract = {There is a growing need for applications that can dynamically interact with aging populations to gather information, monitor their health care, provide information, or even act as companions. Virtual human agents or virtual characters offer a technology that can enable human users to overcome the confusing interfaces found in current human-computer interactions. These artificially intelligent virtual characters have speech recognition, natural language and vision that will allow human users to interact with their computers in a more natural way. Additionally, sensors may be used to monitor the environment for specific behaviors that can be fused into a virtual human system. As a result, the virtual human may respond to a patient or elderly person in a manner that will have a powerful affect on their living situation. This paper will describe the virtual human technology developed and some current applications that apply the technology to virtual patients for mental health diagnosis and clinician training. Additionally the paper will discuss possible ways in which the virtual humans may be utilized for assisted health care and for the integration of multi-modal input to enhance the virtual human system.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Blascovich, James J.; Chemers, Martin M.; Hunt, Earl; Ilgen, Daniel R.; Larsen, Randy L.; Mayer, Richard E.; O'Neil, Harold Jr.; McLaughlin, Alan J.; Patel, Vilma L.; Quiñones, Miguel A.; Simons, Anna
Human Behavior in Military Contexts Book
The National Academies Press, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@book{gratch_human_2008,
title = {Human Behavior in Military Contexts},
author = {Jonathan Gratch and James J. Blascovich and Martin M. Chemers and Earl Hunt and Daniel R. Ilgen and Randy L. Larsen and Richard E. Mayer and Harold Jr. O'Neil and Alan J. McLaughlin and Vilma L. Patel and Miguel A. Quiñones and Anna Simons},
url = {http://www.ict.usc.edu/pubs/Human%20Behavior%20in%20Military%20Contexts.pdf},
year = {2008},
date = {2008-06-01},
publisher = {The National Academies Press},
abstract = {Human behavior forms the nucleus of military effectiveness. Humans operating in the complex military system must possess the knowledge, skills, abilities, aptitudes, and temperament to perform their roles effectively in a reliable and predictable manner, and effective military management requires understanding of how these qualities can be best provided and assessed. Scientific research in this area is critical to understanding leadership, training and other personnel issues, social interactions and organizational structures within the military. The U.S. Army Research Institute for the Behavioral and Social Sciences (ARI) asked the National Research Council to provide an agenda for basic behavioral and social research focused on applications in both the short and long-term. The committee responded by recommending six areas of research on the basis of their relevance, potential impact, and timeliness for military needs: intercultural competence; teams in complex environments; technology-based training; nonverbal behavior; emotion; and behavioral neurophysiology. The committee suggests doubling the current budget for basic research for the behavioral and social sciences across U.S. military research agencies. The additional funds can support approximately 40 new projects per year across the committee's recommended research areas. Human Behavior in Military Contexts includes committee reports and papers that demonstrate areas of stimulating, ongoing research in the behavioral and social sciences that can enrich the military's ability to recruit, train, and enhance the performance of its personnel, both organizationally and in its many roles in other cultures.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {book}
}
Gandhe, Sudeep; Traum, David
An Evaluation Understudy for Dialogue Coherence Models Proceedings Article
In: 9th SIGdial Workshop on Discourse and Dialogue, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_evaluation_2008,
title = {An Evaluation Understudy for Dialogue Coherence Models},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/An%20Evaluation%20Understudy%20for%20Dialogue%20Coherence%20Models.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th SIGdial Workshop on Discourse and Dialogue},
abstract = {Evaluating a dialogue system is seen as a major challenge within the dialogue research community. Due to the very nature of the task, most of the evaluation methods need a substantial amount of human involvement. Following the tradition in machine translation, summarization and discourse coherence modeling, we introduce the the idea of evaluation understudy for dialogue coherence models. Following (Lapata, 2006), we use the information ordering task as a testbed for evaluating dialogue coherence models. This paper reports findings about the reliability of the information ordering task as applied to dialogues. We find that simple n-gram co-occurrence statistics similar in spirit to BLEU (Papineni et al., 2001) correlate very well with human judgments for dialogue coherence.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}