Publications
Search
Dennison, Mark; Neubauer, Cathy; Passaro, Tony; Harrison, Andre; Scherer, Stefan; Khooshabeh, Pete
Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task Proceedings Article
In: Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop, Los Angeles, CA, 2016.
@inproceedings{dennison_using_2016,
title = {Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task},
author = {Mark Dennison and Cathy Neubauer and Tony Passaro and Andre Harrison and Stefan Scherer and Pete Khooshabeh},
url = {http://marksdennison.com/s/DennisonPAVA2016.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop},
address = {Los Angeles, CA},
abstract = {Teams of two individuals worked together in a high-intensity simu-lated bomb diffusing task. Half the teams were given icebreaker social time to increase comfort and familiarity with each other and the remaining half of the teams served as controls and did not meet until the task began. Electrocardiog-raphy and impedance cardiography were recorded to examine cardiac changes during task cooperation. Changes in ventricular contractility showed that individ-uals who had taken part in the icebreaker showed increased task engagement over time whereas controls showed the opposite. Data also trended to show that ice-breaker participants were in a challenge state and controls were in a threat state during the final thirty seconds of bomb defusal. Finally, we show that a set of cardiac features can be used to classify participant data as belonging to the ice-breaker or control groups with an accuracy as high as 88%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bresnahan, T.; Rizzo, A.; Burke, S. L.; Partin, M.; Ahlness, R. M.; Trimmer, M.
Using Virtual Interactive Training Agents (VITA) with Adults with Autism and other Developmental Disabilities Proceedings Article
In: Proceedings of the 2016 International Conference on Disability, Virtual Reality, and Associated Technology, pp. 49–56, ICDVRAT and the University of Reading, Los Angeles, CA, 2016, ISBN: 978-0-7049-1547-3.
@inproceedings{bresnahan_using_2016,
title = {Using Virtual Interactive Training Agents (VITA) with Adults with Autism and other Developmental Disabilities},
author = {T. Bresnahan and A. Rizzo and S. L. Burke and M. Partin and R. M. Ahlness and M. Trimmer},
url = {http://www.icdvrat.org/2016/papers/ICDVRAT2016_S02N2_Bresnahan_etal.pdf},
isbn = {978-0-7049-1547-3},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 International Conference on Disability, Virtual Reality, and Associated Technology},
pages = {49–56},
publisher = {ICDVRAT and the University of Reading},
address = {Los Angeles, CA},
abstract = {Conversational Virtual Human (VH) agents are increasingly being used to support role-play experiential learning across a range of use-cases and populations. This project examined whether use of the Virtual Interactive Training Agent (VITA) system would improve job interviewing skills in a sample of persons with autism or other developmental disability. The study examined performance differences between baseline and final interviews in face-to-face and virtual reality conditions, and whether statistically significant increases were demonstrated between interviewing conditions. Paired samples t-tests were utilized to examine mean changes in performance by interview stage and in the overall difference between baseline and final interview stages. The preliminary results indicated that VITA is a positive factor when preparing young adults with autism or other developmental disability for employment interviews. Statistically significant results were demonstrated across all pilot conditions and in all but one post-assessment condition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Chandrashekhar, Nithin; Shapiro, Ari; Morency, Louis-Philippe; Scherer, Stefan
Manipulating the Perception of Virtual Audiences using Crowdsourced Behaviors Proceedings Article
In: Proceedings of the IVA 2016 : Intelligent Virtual Agents Conference, Springer, Los Angeles, CA, 2016.
@inproceedings{chollet_manipulating_2016,
title = {Manipulating the Perception of Virtual Audiences using Crowdsourced Behaviors},
author = {Mathieu Chollet and Nithin Chandrashekhar and Ari Shapiro and Louis-Philippe Morency and Stefan Scherer},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110162.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the IVA 2016 : Intelligent Virtual Agents Conference},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {Virtual audiences are used for training public speaking and mitigating anxiety related to it. However, research has been scarce on studying how virtual audiences are perceived and which non-verbal behaviors should be used to make such an audience appear in particular states, such as boredom or engagement. Recently, crowdsourcing methods have been proposed for collecting data for building virtual agents' behavior models. In this paper, we use crowdsourcing for creating and evaluating a nonverbal behaviors generation model for virtual audiences. We show that our model successfully expresses relevant audience states (i.e. low to high arousal, negative to positive valence), and that the overall impression exhibited by the virtual audience can be controlled my manipulating the amount of individual audience members that display a congruent state.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Sayan; Laksana, Eugene; Morency, Louis-Philippe; Scherer, Stefen
An Unsupervised Approach to Glottal Inverse Filtering Proceedings Article
In: Proceedings of the 2016 24th European Signal Processing Conference (EUSIPCO), Budapest, Hungary, 2016.
@inproceedings{ghosh_unsupervised_2016,
title = {An Unsupervised Approach to Glottal Inverse Filtering},
author = {Sayan Ghosh and Eugene Laksana and Louis-Philippe Morency and Stefen Scherer},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2016/papers/1570252319.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 24th European Signal Processing Conference (EUSIPCO)},
address = {Budapest, Hungary},
abstract = {The extraction of the glottal volume velocity waveform from voiced speech is a well-known example of a sparse signal recovery problem. Prior approaches have mostly used wellengineered speech processing or convex L1-optimization methods to solve the inverse filtering problem. In this paper, we describe a novel approach to modeling the human vocal tract using an unsupervised dictionary learning framework. We make the assumption of an all-pole model of the vocal tract, and derive an L1 regularized least squares loss function for the all-pole approximation. To evaluate the quality of the extracted glottal volume velocity waveform, we conduct experiments on real-life speech datasets, which include vowels and multi-speaker phonetically balanced utterances. We find that the the unsupervised model learns meaningful dictionaries of vocal tracts, and the proposed data-driven unsupervised framework achieves a performance comparable to the IAIF (Iterative Adaptive Inverse Filtering) glottal flow extraction approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Sayan; Laksana, Eugene; Morency, Louis-Philippe; Scherer, Stefan
Representation Learning for Speech Emotion Recognition Journal Article
In: Interspeech 2016, pp. 3603–3607, 2016.
@article{ghosh_representation_2016,
title = {Representation Learning for Speech Emotion Recognition},
author = {Sayan Ghosh and Eugene Laksana and Louis-Philippe Morency and Stefan Scherer},
url = {https://www.researchgate.net/publication/307889274_Representation_Learning_for_Speech_Emotion_Recognition},
doi = {10.21437},
year = {2016},
date = {2016-09-01},
journal = {Interspeech 2016},
pages = {3603–3607},
abstract = {Speech emotion recognition is an important problem with applications as varied as human-computer interfaces and affective computing. Previous approaches to emotion recognition have mostly focused on extraction of carefully engineered features and have trained simple classifiers for the emotion task. There has been limited effort at representation learning for affect recognition, where features are learnt directly from the signal waveform or spectrum. Prior work also does not investigate the effect of transfer learning from affective attributes such as valence and activation to categorical emotions. In this paper, we investigate emotion recognition from spectrogram features extracted from the speech and glottal flow signals; spectrogram encoding is performed by a stacked autoencoder and an RNN (Recurrent Neural Network) is used for classification of four primary emotions. We perform two experiments to improve RNN training : (1) Representation Learning - Model training on the glottal flow signal to investigate the effect of speaker and phonetic invariant features on classification performance (2) Transfer Learning - RNN training on valence and activation, which is adapted to a four emotion classification task. On the USC-IEMOCAP dataset, our proposed approach achieves a performance comparable to the state of the art speech emotion recognition systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ahn, Emily; Morbini, Fabrizio; Gordon, Andrew S.
Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing Proceedings Article
In: Proceedings of the 9th International Natural Language Generation Conference (INLG-2016), Edinburgh, UK, 2016.
@inproceedings{ahn_improving_2016,
title = {Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing},
author = {Emily Ahn and Fabrizio Morbini and Andrew S. Gordon},
url = {https://www.researchgate.net/publication/307512031_Improving_Fluency_in_Narrative_Text_Generation_With_Grammatical_Transformations_and_Probabilistic_Parsing},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 9th International Natural Language Generation Conference (INLG-2016)},
address = {Edinburgh, UK},
abstract = {In research on automatic generation of narrative text, story events are often formally represented as a causal graph. When serializing and realizing this causal graph as natural language text, simple approaches produce cumbersome sentences with repetitive syntactic structure, e.g. long chains of “because” clauses. In our research, we show that the fluency of narrative text generated from causal graphs can be improved by applying rule-based grammatical transformations to generate many sentence variations with equivalent semantics, then selecting the variation that has the highest probability using a probabilistic syntactic parser. We evaluate our approach by generating narrative text from causal graphs that encode 100 brief stories involving the same three characters, based on a classic film of experimental social psychology. Crowdsourced workers judged the writing quality of texts generated with ranked transformations as significantly higher than those without, and not significantly lower than human-authored narratives of the same situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Tin, Jessica; Brown, Jeremy; Fritzsch, Elisabeth; Gabber, Shirley
Wochat Chatbot User Experience Summary Proceedings Article
In: Proceedings of the 2016 IVA: WOCHAT Workshop, Zerotype, Los Angeles, CA, 2016.
@inproceedings{gordon_wochat_2016,
title = {Wochat Chatbot User Experience Summary},
author = {Carla Gordon and Jessica Tin and Jeremy Brown and Elisabeth Fritzsch and Shirley Gabber},
url = {http://workshop.colips.org/wochat/documents/ST-281.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 IVA: WOCHAT Workshop},
publisher = {Zerotype},
address = {Los Angeles, CA},
abstract = {A team of 5 interns at the USC Institute for Creative Technologies interacted with 5 of the 6 chatbots; IRIS, Sammy, Sarah, TickTock and Joker. Unfortunately no one in our team could get the 6th chatbot, pyEliza, working. We found that there were certainly some chatbots that were better than others, and some of us were surprised by how distinct each bot felt from the others. One member commented on how they felt as though each different chatbot had an individual “voice” so to speak. Others were surprised by just how much of a “personality” the bots seemed to have. Most members of our team cited IRIS as their favorite, in terms of being capable of producing naturalistic conversation, with Sammy taking a close second. However, only one member of the team was able to interact with Sarah and TickTock, but that member cited TickTock as a capable conversation partner, and Sarah as being the best bot on a number of measures including appropriateness of responses and overall conversation cohesiveness. Therefore, perhaps if more members had been able to interact with Sarah and TickTock they may have ranked higher. Lastly, Joker was by far our least favorite, with whom no member of our team was able to have anything resembling a naturalistic or even cohesive conversation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Paetzel, Maike; Qu, Cheng; Schlangen, David; DeVault, David
Toward incremental dialogue act segmentation in fast-paced interactive dialogue systems Proceedings Article
In: Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 252–262, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{manuvinakurike_toward_2016,
title = {Toward incremental dialogue act segmentation in fast-paced interactive dialogue systems},
author = {Ramesh Manuvinakurike and Maike Paetzel and Cheng Qu and David Schlangen and David DeVault},
url = {http://www.aclweb.org/anthology/W16-3632},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {252–262},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {In this paper, we present and evaluate an approach to incremental dialogue act (DA) segmentation and classification. Our approach utilizes prosodic, lexico-syntactic and contextual features, and achieves an encouraging level of performance in offline corpus-based evaluation as well as in simulated human-agent dialogues. Our approach uses a pipeline of sequential processing steps, and we investigate the contribution of different processing steps to DA segmentation errors. We present our results using both existing and new metrics for DA segmentation. The incremental DA segmentation capability described here may help future systems to allow more natural speech from users and enable more natural patterns of interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Kennington, Casey; DeVault, David; Schlangen, David
Real-Time Understanding of Complex Discriminative Scene Descriptions Proceedings Article
In: Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 232–241, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{manuvinakurike_real-time_2016,
title = {Real-Time Understanding of Complex Discriminative Scene Descriptions},
author = {Ramesh Manuvinakurike and Casey Kennington and David DeVault and David Schlangen},
url = {http://www.aclweb.org/anthology/W16-3630},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {232–241},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {Real-world scenes typically have complex structure, and utterances about them consequently do as well. We devise and evaluate a model that processes descriptions of complex configurations of geometric shapes and can identify the described scenes among a set of candidates, including similar distractors. The model works with raw images of scenes, and by design can work word-by-word incrementally. Hence, it can be used in highly-responsive interactive and situated settings. Using a corpus of descriptions from game-play between human subjects (who found this to be a challenging task), we show that reconstruction of description structure in our system contributes to task success and supports the performance of the word-based model of grounded semantics that we use.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Konovalov, Vasily; Melamud, Oren; Artstein, Ron; Dagan, Ido
Collecting Better Training Data using Biased Agent Policies in Negotiation Dialogues Proceedings Article
In: Proceedings of WOCHAT, the Second Workshop on Chatbots and Conversational Agent Technologies, Zerotype, Los Angeles, 2016.
@inproceedings{konovalov_collecting_2016,
title = {Collecting Better Training Data using Biased Agent Policies in Negotiation Dialogues},
author = {Vasily Konovalov and Oren Melamud and Ron Artstein and Ido Dagan},
url = {http://workshop.colips.org/wochat/documents/RP-270.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of WOCHAT, the Second Workshop on Chatbots and Conversational Agent Technologies},
publisher = {Zerotype},
address = {Los Angeles},
abstract = {When naturally occurring data is characterized by a highly skewed class distribution, supervised learning often benefits from reducing this skew. Human-agent dialogue data is commonly highly skewed when using standard agent policies. Hence, we suggest that agent policies need to be reconsidered in the context of training data collection. Specifically, in this work we implemented biased agent policies that are optimized for data collection in the negotiation domain. Empirical evaluations show that our method is successful in collecting a reasonably balanced corpus in the highly skewed Job-Candidate domain. Furthermore, using this balanced corpus to train a negotiation intent classifier yields notable performance improvements relative to naturally distributed data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; DeVault, David; Lucas, Gale
The Benefits of Virtual Humans for Teaching Negotiation Proceedings Article
In: Proceedings of the 16th International Conference on Intelligent Virtual Agents (IVA), 2016, Springer, Los Angeles, CA, 2016.
@inproceedings{gratch_benefits_2016,
title = {The Benefits of Virtual Humans for Teaching Negotiation},
author = {Jonathan Gratch and David DeVault and Gale Lucas},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110276.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 16th International Conference on Intelligent Virtual Agents (IVA), 2016},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {This article examines the potential for teaching negotiation with virtual humans. Many people find negotiations to be aversive. We conjecture that stu-dents may be more comfortable practicing negotiation skills with an agent than with another person. We test this using the Conflict Resolution Agent, a semi-automated virtual human that negotiates with people via natural language. In a between-participants design, we independently manipulated two pedagogically-relevant factors while participants engaged in repeated negotiations with the agent: perceived agency (participants either believed they were negotiating with a computer program or another person) and pedagogical feedback (participants received instructional advice or no advice between negotiations). Findings indi-cate that novice negotiators were more comfortable negotiating with a computer program (they self-reported more comfort and punished their opponent less of-ten) and expended more effort on the exercise following instructional feedback (both in time spent and in self-reported effort). These findings lend support to the notion of using virtual humans to teach interpersonal skills.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, AA; Lucas, G; Gratch, J; Stratou, G; Morency, L-P; Shilling, R; Hartholt, A; Scherer, S
Clinical interviewing by a virtual human agent with automatic behavior analysis Proceedings Article
In: Proceedings of The 2016 Proceedings of the International Conference on Disability, Virtual Reality and Associated Technologies, pp. 57–64, ICDVRAT and the University of Reading, Los Angeles, CA, 2016, ISBN: 978-0-7049-1547-3.
@inproceedings{rizzo_clinical_2016,
title = {Clinical interviewing by a virtual human agent with automatic behavior analysis},
author = {AA Rizzo and G Lucas and J Gratch and G Stratou and L-P Morency and R Shilling and A Hartholt and S Scherer},
url = {http://centaur.reading.ac.uk/66645/8/ICDVRAT2016_Full_Proceedings_11th%20_Conf.pdf},
isbn = {978-0-7049-1547-3},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of The 2016 Proceedings of the International Conference on Disability, Virtual Reality and Associated Technologies},
pages = {57–64},
publisher = {ICDVRAT and the University of Reading},
address = {Los Angeles, CA},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. As well, previous research with SimSensei indicates that users engaging with this automated system, have less fear of evaluation and self-disclose more personal information compare to when they believe the VH agent is actually an avatar being operated by a “wizard of oz” human-in-the-loop (Lucas et al., 2014). The current study presents results from a sample of military service members (SMs) who were interviewed within the SimSensei system before and after a deployment to Afghanistan. Results indicate that SMs reveal more PTSD symptoms to the SimSensei VH agent than they self-report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and fewer happy expressions at post deployment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hiraoka, Takuya; Georgila, Kallirroi; Nouri, Elnaz; Traum, David; Nakamura, Satoshi
Reinforcement Learning of Multi-Party Trading Dialog Policies Journal Article
In: Transactions of the Japanese Society for Artificial Intelligence, vol. 31, 2016, ISSN: 1346-8030.
@article{hiraoka_reinforcement_2016,
title = {Reinforcement Learning of Multi-Party Trading Dialog Policies},
author = {Takuya Hiraoka and Kallirroi Georgila and Elnaz Nouri and David Traum and Satoshi Nakamura},
url = {https://www.jstage.jst.go.jp/article/tjsai/31/4/31_B-FC1/_pdf},
issn = {1346-8030},
year = {2016},
date = {2016-09-01},
journal = {Transactions of the Japanese Society for Artificial Intelligence},
volume = {31},
abstract = {Trading dialogs are a kind of negotiation in which an exchange of ownership of items is discussed, and these kinds of dialogs are pervasive in many situations. Recently, there has been an increasing amount of research on applying reinforcement learning (RL) to negotiation dialog domains. However, in previous research, the focus was on negotiation dialog between two participants only, ignoring cases where negotiation takes place between more than two interlocutors. In this paper, as a first study on multi-party negotiation, we apply RL to a multi-party trading scenario where the dialog system (learner) trades with one, two, or three other agents. We experiment with different RL algorithms and reward functions. We use Q-learning with linear function approximation, least-squares policy iteration, and neural fitted Q iteration. In addition, to make the learning process more efficient, we introduce an incremental reward function. The negotiation strategy of the learner is learned through simulated dialog with trader simulators. In our experiments, we evaluate how the performance of the learner varies depending on the RL algorithm used and the number of traders. Furthermore, we compare the learned dialog policies with two strong hand-crafted baseline dialog policies. Our results show that (1) even in simple multi-party trading dialog tasks, learning an effective negotiation policy is not a straightforward task and requires a lot of experimentation; and (2) the use of neural fitted Q iteration combined with an incremental reward function produces negotiation policies as effective or even better than the policies of the two strong hand-crafted baselines.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Marge, Matthew; Bonial, Claire; Pollard, Kimberly A.; Artstein, Ron; Byrne, Brendan; Hill, Susan G.; Voss, Clare; Traum, David
Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards Proceedings Article
In: Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),, Springer, Los Angeles, CA, 2016.
@inproceedings{marge_assessing_2016,
title = {Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards},
author = {Matthew Marge and Claire Bonial and Kimberly A. Pollard and Ron Artstein and Brendan Byrne and Susan G. Hill and Clare Voss and David Traum},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110460.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {The Wizard-of-Oz (WOz) method is a common experimental technique in virtual agent and human-robot dialogue research for eliciting natural communicative behavior from human partners when full autonomy is not yet possible. For the first phase of our research reported here, wizards play the role of dialogue manager, acting as a robot’s dialogue processing. We describe a novel step within WOz methodology that incorporates two wizards and control sessions: the wizards function much like corpus annotators, being asked to make independent judgments on how the robot should respond when receiving the same verbal commands in separate trials. We show that inter-wizard discussion after the control sessions and the resolution with a reconciled protocol for the follow-on pilot sessions successfully impacts wizard behaviors and significantly aligns their strategies. We conclude that, without control sessions, we would have been unlikely to achieve both the natural diversity of expression that comes with multiple wizards and a better protocol for modeling an automated system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mizukami, Masahiro; Yoshino, Koichiro; Neubig, Graham; Traum, David; Nakamura, Satoshi
Analyzing the Effect of Entrainment on Dialogue Acts Proceedings Article
In: Proceedings of the SIGDIAL 2016 Conference, pp. 310–318, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{mizukami_analyzing_2016,
title = {Analyzing the Effect of Entrainment on Dialogue Acts},
author = {Masahiro Mizukami and Koichiro Yoshino and Graham Neubig and David Traum and Satoshi Nakamura},
url = {http://www.sigdial.org/workshops/conference17/proceedings/pdf/SIGDIAL40.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the SIGDIAL 2016 Conference},
pages = {310–318},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {Entrainment is a factor in dialogue that affects not only human-human but also human-machine interaction. While entrainment on the lexical level is well documented, less is known about how entrainment affects dialogue on a more abstract, structural level. In this paper, we investigate the effect of entrainment on dialogue acts and on lexical choice given dialogue acts, as well as how entrainment changes during a dialogue. We also define a novel measure of entrainment to measure these various types of entrainment. These results may serve as guidelines for dialogue systems that would like to entrain with users in a similar manner.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krämer, Nicole C.; Karacora, Bilge; Lucas, Gale; Dehghani, Morteza; Rüther, Gina; Gratch, Jonathan
In: Computers & Education, vol. 99, pp. 1–13, 2016, ISSN: 03601315.
@article{kramer_closing_2016,
title = {Closing the gender gap in STEM with friendly male instructors? On the effects of rapport behavior and gender of a virtual agent in an instructional interaction},
author = {Nicole C. Krämer and Bilge Karacora and Gale Lucas and Morteza Dehghani and Gina Rüther and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0360131516300835},
doi = {10.1016/j.compedu.2016.04.002},
issn = {03601315},
year = {2016},
date = {2016-08-01},
journal = {Computers & Education},
volume = {99},
pages = {1–13},
abstract = {While numerous research endeavors address the effects of pedagogical agents, the role of the agent´s gender and its rapport behavior has been neglected. We hypothesize that a minimal amount of behavioral realism induced by display of rapport is necessary for any social effects to occur in human-computer interaction. Further, in line with results from STEM research on female role models, we assume that especially for female learners a same sex agent will be beneficial. In a 2(student gender)x2(agent gender)x2(rapport behavior yes/no) between subjects design, we investigate whether virtual agents can help enhance participants’ performance, effort and motivation in mathematics. Female and male participants (N = 128) interacted with a male or female virtual agent that either displayed rapport or no rapport. Our results confirm the expected main effect of rapport. However, against expectations, our results do not support the assumption that a same sex agent is beneficial for female learners. Participants’ performance and effort were significantly enhanced when interacting with an agent of opposite gender that displayed rapport. Our results have implications on designing agents for education and training purposes.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification Journal Article
In: Journal of Artificial General Intelligence, 2016, ISSN: 1946-0163.
@article{rosenbloom_sigma_2016,
title = {The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://www.degruyter.com/view/j/jagi.ahead-of-print/jagi-2016-0001/jagi-2016-0001.xml},
doi = {10.1515/jagi-2016-0001},
issn = {1946-0163},
year = {2016},
date = {2016-07-01},
journal = {Journal of Artificial General Intelligence},
abstract = {Sigma (Σ) is a cognitive architecture and system whose development is driven by a combination of four desiderata: grand unification, generic cognition, functional elegance, and sufficient efficiency. Work towards these desiderata is guided by the graphical architecture hypothesis, that key to progress on them is combining what has been learned from over three decades’ worth of separate work on cognitive architectures and graphical models. In this article, these four desiderata are motivated and explained, and then combined with the graphical architecture hypothesis to yield a rationale for the development of Sigma. The current state of the cognitive architecture is then introduced in detail, along with the graphical architecture that sits below it and implements it. Progress in extending Sigma beyond these architectures and towards a full cognitive system is then detailed in terms of both a systematic set of higher level cognitive idioms that have been developed and several virtual humans that are built from combinations of these idioms. Sigma as a whole is then analyzed in terms of how well the progress to date satisfies the desiderata. This article thus provides the first full motivation, presentation and analysis of Sigma, along with a diversity of more specific results that have been generated during its development.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nazari, Zahra; Gratch, Jonathan
Predictive Models of Malicious Behavior in Human Negotiations Journal Article
In: Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence, pp. 855–861, 2016.
@article{nazari_predictive_2016,
title = {Predictive Models of Malicious Behavior in Human Negotiations},
author = {Zahra Nazari and Jonathan Gratch},
url = {http://www.ijcai.org/Proceedings/16/Papers/126.pdf},
year = {2016},
date = {2016-07-01},
journal = {Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence},
pages = {855–861},
abstract = {Human and artificial negotiators must exchange information to find efficient negotiated agreements, but malicious actors could use deception to gain unfair advantage. The misrepresentation game is a game-theoretic formulation of how deceptive actors could gain disproportionate rewards while seeming honest and fair. Previous research proposed a solution to this game but this required restrictive assumptions that might render it inapplicable to realworld settings. Here we evaluate the formalism against a large corpus of human face-to-face negotiations. We confirm that the model captures how dishonest human negotiators win while seeming fair, even in unstructured negotiations. We also show that deceptive negotiators give-off signals of their malicious behavior, providing the opportunity for algorithms to detect and defeat this malicious tactic.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks Proceedings Article
In: International Conference on Artificial General Intelligence, pp. 84–94, Springer, New York, NY, 2016, ISBN: 978-3-319-41649-6.
@inproceedings{rosenbloom_rethinking_2016,
title = {Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://link.springer.com/chapter/10.1007/978-3-319-41649-6_9},
doi = {10.1007/978-3-319-41649-6_9},
isbn = {978-3-319-41649-6},
year = {2016},
date = {2016-07-01},
booktitle = {International Conference on Artificial General Intelligence},
volume = {9782},
pages = {84–94},
publisher = {Springer},
address = {New York, NY},
abstract = {The status of Sigma’s grounding in graphical models is challenged by the ways in which their semantics has been violated while incorporating rule-based reasoning into them. This has led to a rethinking of what goes on in its graphical architecture, with results that include a straightforward extension to feedforward neural networks (although not yet with learning).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul
Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture Book Section
In: Integrating Cognitive Architectures into Virtual Character Design, pp. 213 – 237, IGI Global, Hershey, PA, 2016, ISBN: 978-1-5225-0454-2.
@incollection{ustun_towards_2016,
title = {Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul Rosenbloom},
url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-5225-0454-2},
isbn = {978-1-5225-0454-2},
year = {2016},
date = {2016-06-01},
booktitle = {Integrating Cognitive Architectures into Virtual Character Design},
pages = {213 – 237},
publisher = {IGI Global},
address = {Hershey, PA},
abstract = {Realism is required not only for how synthetic characters look but also for how they behave. Many applications, such as simulations, virtual worlds, and video games, require computational models of intelligence that generate realistic and credible behavior for the participating synthetic characters. Sigma (Σ) is being built as a computational model of general intelligence with a long-term goal of understanding and replicating the architecture of the mind; i.e., the fixed structure underlying intelligent behavior. Sigma leverages probabilistic graphical models towards a uniform grand unification of not only traditional cognitive capabilities but also key non-cognitive aspects, creating unique opportunities for the construction of new kinds of non-modular behavioral models. These ambitions strive for the complete control of synthetic characters that behave as humanly as possible. In this paper, Sigma is introduced along with two disparate proof-of-concept virtual humans – one conversational and the other a pair of ambulatory agents – that demonstrate its diverse capabilities.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Filter
2009
Gratch, Jonathan; DeJong, Gerald
A Decision-theoretic Approach to Adaptive Problem Solving Proceedings Article
In: Proceedings of the International Conference on Interactive Digital Storytelling, Guimarães, Portugal, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_decision-theoretic_2009,
title = {A Decision-theoretic Approach to Adaptive Problem Solving},
author = {Jonathan Gratch and Gerald DeJong},
url = {http://ict.usc.edu/pubs/A%20Decision-theoretic%20Approach%20to%20Adaptive%20Problem%20Solving.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the International Conference on Interactive Digital Storytelling},
address = {Guimarães, Portugal},
abstract = {Computer aided interactive narrative has received increasing attention in recent years. Automated directorial control that manages the development of the story in the face of user interaction is an important aspect of interactive narrative design. Most existing approaches lack an explicit model of the user. This limits the approaches' ability of predicting the user's experience, and hence undermines the effectiveness of the approaches. Thespian is a multi-agent framework for authoring and simulating interactive narratives with explicit models of the user. This work extends Thespian with the ability to provide proactive directorial control using the user model. In this paper, we present the algorithms in detail, followed by examples.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Gratch, Jonathan; Leuski, Anton; Morency, Louis-Philippe; Marsella, Stacy C.; Liewer, Matt; Doraiswamy, Prathibha; Weiss, Lori; LeMasters, Kim; Fast, Edward; Sadek, Ramy; Marshall, Andrew; Lee, Jina; Thiebaux, Marcus; Tsiartas, Andreas
At the Virtual Frontier: Introducing Gunslinger, a Multi- Character, Mixed-Reality, Story-Driven Experience Proceedings Article
In: Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA), Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{hartholt_at_2009,
title = {At the Virtual Frontier: Introducing Gunslinger, a Multi- Character, Mixed-Reality, Story-Driven Experience},
author = {Arno Hartholt and Jonathan Gratch and Anton Leuski and Louis-Philippe Morency and Stacy C. Marsella and Matt Liewer and Prathibha Doraiswamy and Lori Weiss and Kim LeMasters and Edward Fast and Ramy Sadek and Andrew Marshall and Jina Lee and Marcus Thiebaux and Andreas Tsiartas},
url = {http://ict.usc.edu/pubs/At%20the%20Virtual%20Frontier-%20Introducing%20Gunslinger%20a%20Multi-%20Character%20Mixed-Reality%20Story-Driven%20Experience.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA)},
address = {Amsterdam, The Netherlands},
abstract = {We describe an application of intelligent virtual agents to the domain of mixed-reality interactive entertainment. Gunslinger allows users to interact with life-sized virtual humans within the context of a wild west story world. The application incorporates a novel integration of capabilities including gesture and spoken language recognition, story and dialogue reasoning, and multi-character, multi-modal behavior generation and synthesis. The article describes our design process, technological innovations, and initial feedback from user interactions with the system.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Difede, JoAnn; Rothbaum, Barbara O.; Johnston, Scott; McLay, Robert N.; Reger, Greg; Gahm, Greg; Parsons, Thomas D.; Graap, Ken; Pair, Jarrell
VR PTSD Exposure Therapy Results with Active Duty OIF/OEF Combatants Journal Article
In: Medicine Meets Virtual Reality, vol. 17, 2009.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_vr_2009,
title = {VR PTSD Exposure Therapy Results with Active Duty OIF/OEF Combatants},
author = {Albert Rizzo and JoAnn Difede and Barbara O. Rothbaum and Scott Johnston and Robert N. McLay and Greg Reger and Greg Gahm and Thomas D. Parsons and Ken Graap and Jarrell Pair},
url = {http://ict.usc.edu/pubs/VR%20PTSD%20Exposure%20Therapy%20Results%20with%20Active%20Duty%20OIF%20OEF%20Combatants.pdf},
year = {2009},
date = {2009-01-01},
journal = {Medicine Meets Virtual Reality},
volume = {17},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Reports indicate that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality exposure therapy has been previously used for PTSD with reports of positive outcomes. This paper will present a brief description of the USC/ICT Virtual Iraq/Afghanistan PTSD therapy application and present clinical outcome data from active duty patients treated at the Naval Medical Center-San Diego (NMCSD) as of October 2009. Initial outcomes from the first twenty patients to complete treatment indicate that 16 no longer meet diagnostic criteria for PTSD at post treatment. Research and clinical tests using the Virtual Iraq/Afghanistan software are also currently underway at Weill Cornell Medical College, Emory University, Fort Lewis and WRAMC along with 20 other test sites.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Morie, Jacquelyn; Antonisse, Jamie; Bouchard, Sean; Chance, Eric
Virtual Worlds as a Healing Modality for Returning Soldiers and Veterans Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine; Studies in Health Technology and Informatics, IOS Press, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans, Virtual Worlds
@inproceedings{morie_virtual_2009,
title = {Virtual Worlds as a Healing Modality for Returning Soldiers and Veterans},
author = {Jacquelyn Morie and Jamie Antonisse and Sean Bouchard and Eric Chance},
url = {http://ict.usc.edu/pubs/Virtual%20Worlds%20as%20a%20Healing%20Modality%20for%20Returning%20Soldiers%20and%20Veterans.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine; Studies in Health Technology and Informatics},
volume = {144},
publisher = {IOS Press},
abstract = {Those who have served in recent conflicts face many challenges as they reintegrate into society. In addition to recovering from physical wounds, traumatic brain injury and post-traumatic stress disorders, many soldiers also face basic psychological issues about who they are and how to find their place in a society that has not shared their experiences. To address these challenges, we have created a space that provides ongoing opportunities for healing activities, personal exploration and social camaraderie in an online virtual world, Second Life. In such worlds, where each avatar is controlled by a live individual, experiences can be unintuitive, uninviting, considered boring or difficult to control. To counter this, we are implementing autonomous intelligent agent avatars that can be "on duty" 24/7, serving as guides and information repositories, making the space and activities easy to find and even personalized to the visitor's needs. We report the results of usability testing with an in-world veterans' group. Tests comparing soldiers who use this space as part of their reintegration regimen compared to those who do not are being scheduled as part of the Army's Warriors in Transition program.},
keywords = {Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
2008
Leuski, Anton; Traum, David
A Statistical Approach for Text Processing in Virtual Humans Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_statistical_2008,
title = {A Statistical Approach for Text Processing in Virtual Humans},
author = {Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/A%20STATISTICAL%20APPROACH%20FOR%20TEXT%20PROCESSING%20IN%20VIRTUAL%20HUMANS.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {We describe a text classi⬚cation approach based on statistical language modeling. We show how this approach can be used for several natural language processing tasks in a virtual human system. Speci⬚cally, we show it can applied to language understanding, language generation, and character response selection tasks. We illustrate these applications with some experimental results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Parsons, Thomas D.; Pair, Jarrell; McLay, Robert N.; Johnston, Scott; Perlman, Karen; Deal, Robert; Reger, Greg; Gahm, Greg; Roy, Michael; Shilling, Russell; Rothbaum, Barbara O.; Graap, Ken; Spitalnick, Josh; Bordnick, Patrick; Difede, JoAnn
Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_clinical_2008,
title = {Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD},
author = {Albert Rizzo and Thomas D. Parsons and Jarrell Pair and Robert N. McLay and Scott Johnston and Karen Perlman and Robert Deal and Greg Reger and Greg Gahm and Michael Roy and Russell Shilling and Barbara O. Rothbaum and Ken Graap and Josh Spitalnick and Patrick Bordnick and JoAnn Difede},
url = {http://ict.usc.edu/pubs/Clinical%20Results%20from%20the%20Virtual%20Iraq%20Esposure%20Therapy%20Application%20for%20PTSD.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 5 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been previously used with reports of positive outcomes. The current paper will present the rationale and description of a VR PTSD therapy application (Virtual Iraq) and present initial findings from its use with active duty service members. Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Results from an open clinical trial using Virtual Iraq at the Naval Medical Center-San Diego with 20 treatment completers indicate that 16 no longer met PTSD diagnostic criteria at post-treatment, with only one not maintaining treatment gains at 3 month follow-up.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Cannon, Jacob; Gandhe, Sudeep; Gerten, Jillian; Henderer, Joe; Leuski, Anton; Traum, David
Coherence of Off-Topic Response for a Virtual Character Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_coherence_2008,
title = {Coherence of Off-Topic Response for a Virtual Character},
author = {Ron Artstein and Jacob Cannon and Sudeep Gandhe and Jillian Gerten and Joe Henderer and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/COHERENCE%20OF%20OFF-TOPIC%20RESPONSES%20FOR%20A%20VIRTUAL%20CHARACTER.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {We demonstrate three classes of off-topic responses which allow a virtual question-answering character to handle cases where it does not understand the user's input: ask for clarification, indicate misunderstanding, and move on with the conversation. While falling short of full dialogue management, a combination of such responses together with prompts to change the topic can improve overall dialogue coherence.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe
Real-time Head Pose Estimation Using a Webcam: Monocular Adaptive View-based Appearance Model Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{morency_real-time_2008,
title = {Real-time Head Pose Estimation Using a Webcam: Monocular Adaptive View-based Appearance Model},
author = {Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/REAL-TIME%20HEAD%20POSE%20ESTIMATION%20USING%20A%20WEBCAM-%20MONOCULAR%20ADAPTIVE%20VIEW-BASED%20APPEARANCE%20MODEL.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Accurately estimating the person's head position and orientation is an important task for a wide range of applications such as driver awareness and human-robot interaction. Over the past two decades, many approaches have been suggested to solve this problem, each with its own advantages and disadvantages. In this paper, we present a probabilistic framework called Monocular Adaptive View-based Appearance Model (MAVAM) which integrates the advantages from two of these approaches: (1) the relative precision and user-independence of differential registration, and (2) the robustness and bounded drift of keyframe tracking. In our experiments, we show how the MAVAM model can be used to estimate head position and orientation in real-time using a simple monocular camera. Our experiments on two previously published datasets show that the MAVAM framework can accurately track for a long period of time (textbackslashtextbackslashtextbackslashtextbackslashtextgreater2 minutes) with an average accuracy of 3.9 degrees and 1.2in with an inertial sensor and a 3D magnetic sensor.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Leuski, Anton; Roque, Antonio; Gandhe, Sudeep; DeVault, David; Gerten, Jillian; Robinson, Susan; Martinovski, Bilyana
Natural Language Dialogue Architectures for Tactical Questioning Characters Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_natural_2008,
title = {Natural Language Dialogue Architectures for Tactical Questioning Characters},
author = {David Traum and Anton Leuski and Antonio Roque and Sudeep Gandhe and David DeVault and Jillian Gerten and Susan Robinson and Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Natural%20Language%20Dialogue%20Architectures.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper we contrast three architectures for natural language questioning characters. We contrast the relative costs and benefits of each approach in building characters for tactical questioning. The first architecture works purely at the textual level, using cross-language information retrieval techniques to learn the best output for any input from a training set of linked questions and answers. The second architecture adds a global emotional model and computes a compliance model, which can result in different outputs for different levels, given the same inputs. The third architecture works at a semantic level and allows authoring of different policies for response for different kinds of information. We describe these architectures and their strengths and weaknesses with respect to expressive capacity, performance, and authoring demands.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Gratch, Jonathan; Hartholt, Arno; Marsella, Stacy C.; Lee, Jina
Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents Proceedings Article
In: Proceedings of the 8th International Conference on Intelligent Virtual Agents, pp. 117–130, Tokyo, Japan, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_multi-party_2008,
title = {Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents},
author = {David Traum and Jonathan Gratch and Arno Hartholt and Stacy C. Marsella and Jina Lee},
url = {http://ict.usc.edu/pubs/Multi-party,%20Multi-issue,%20Multi-strategy%20Negotiation.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 8th International Conference on Intelligent Virtual Agents},
pages = {117–130},
address = {Tokyo, Japan},
abstract = {We present a model of negotiation for virtual agents that extends previous work to be more human-like and applicable to a broader range of situations, including more than two negotiators with different goals, and negotiating over multiple options. The agents can dynamically change their negotiating strategies based on the current values of several parameters and factors that can be updated in the course of the negotiation.We have implemented this model and done preliminary evaluation within a prototype training system and a three-party negotiation with two virtual humans and one human.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Rizzo, Albert
Virtual Human Patients for Training of Clinical Interview and Communication Skills Proceedings Article
In: Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology, Maia, Portugal, 2008, ISBN: 07 049 15 00 6.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{parsons_virtual_2008,
title = {Virtual Human Patients for Training of Clinical Interview and Communication Skills},
author = {Thomas D. Parsons and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Human%20Patients%20for%20Training%20of%20Clinical%20Interview%20and%20Communication%20Skills.pdf},
isbn = {07 049 15 00 6},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Maia, Portugal},
abstract = {Although schools commonly make use of standardized patients to teach interview skills, the diversity of the scenarios standardized patients can characterize is limited by availability of human actors. Virtual Human Agent technology has evolved to a point where esearchers may begin developing mental health applications that make use of virtual reality patients. The work presented here is a preliminary attempt at what we believe to be a large application area. Herein we describe an ongoing study of our virtual patients. We present an approach that allows novice mental health clinicians to conduct an interview with virtual character that emulates 1) an adolescent male with conduct disorder; and 2) an adolescent female who has recently been physically traumatized.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels Journal Article
In: Lecture Notes in Computer Science, vol. 5208, pp. 484–485, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{de_melo_evolving_2008,
title = {Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evolving%20Expression%20of%20Emotions%20in%20Virtual%20Humans%20Using%20Lights%20and%20Pixels.pdf},
year = {2008},
date = {2008-09-01},
journal = {Lecture Notes in Computer Science},
volume = {5208},
pages = {484–485},
abstract = {nspired by the arts, this paper addresses the challenge of expressing emotions in virtual humans using the environment's lights and the screen's pixels. An evolutionary approach is proposed which relies on genetic algorithms to learn how to map emotions into these forms of expression. The algorithm evolves populations of hypotheses, where each hypothesis represents a configuration of lighting and screen expression. Hypotheses are evaluated by a critic ensemble composed of artificial and human critics. The need for human critics is motivated by a study which reveals the limitations of an approach that relies only on artificial critics that follow principles from art literature. We also address the need for the model to improve with experience and to adapt to the individual, social and cultural values in the arts. Finally, a second study is described where subjects successfully evolved mappings for joy and sadness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gandhe, Sudeep; DeVault, David; Roque, Antonio; Martinovski, Bilyana; Artstein, Ron; Leuski, Anton; Gerten, Jillian; Traum, David
From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters Proceedings Article
In: Proceedings of InterSpeech, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_domain_2008,
title = {From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters},
author = {Sudeep Gandhe and David DeVault and Antonio Roque and Bilyana Martinovski and Ron Artstein and Anton Leuski and Jillian Gerten and David Traum},
url = {http://ict.usc.edu/pubs/From%20Domain%20Specification%20to%20Virtual%20Humans.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of InterSpeech},
abstract = {We present a new approach for rapidly developing dialogue capabilities for virtual humans. Starting from domain specification, an integrated authoring interface automatically generates dialogue acts with all possible contents.These dialogue acts are linked to example utterances in order to provide training data for natural language understanding and generation. The virtual human dialogue system contains a dialogue manager following the information-state approach, using finite-state machines and SCXML to manage local coherence, as well as explicit modeling of emotions and compliance level and a grounding component based on evidence of understanding. Using the authoring tools, we design and implement a version of the virtual human Hassan and compare to previous architectures for the character.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Rizzo, Albert
Virtual Humans for Assisted Health Care Proceedings Article
In: Pervasive Technologies for Assistive Environments (PETRA) Conference Proceedings, ACM, Athens, Greece, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_virtual_2008-1,
title = {Virtual Humans for Assisted Health Care},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20Assisted%20Health%20Care.pdf},
year = {2008},
date = {2008-07-01},
booktitle = {Pervasive Technologies for Assistive Environments (PETRA) Conference Proceedings},
publisher = {ACM},
address = {Athens, Greece},
abstract = {There is a growing need for applications that can dynamically interact with aging populations to gather information, monitor their health care, provide information, or even act as companions. Virtual human agents or virtual characters offer a technology that can enable human users to overcome the confusing interfaces found in current human-computer interactions. These artificially intelligent virtual characters have speech recognition, natural language and vision that will allow human users to interact with their computers in a more natural way. Additionally, sensors may be used to monitor the environment for specific behaviors that can be fused into a virtual human system. As a result, the virtual human may respond to a patient or elderly person in a manner that will have a powerful affect on their living situation. This paper will describe the virtual human technology developed and some current applications that apply the technology to virtual patients for mental health diagnosis and clinician training. Additionally the paper will discuss possible ways in which the virtual humans may be utilized for assisted health care and for the integration of multi-modal input to enhance the virtual human system.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Solomon, Steve; Gratch, Jonathan; Bulitko, Vadim; Lent, Michael
Modeling Culturally and Emotionally Affected Behavior Proceedings Article
In: The 10th International Conference on the Simulation of Adaptive Behavior (SAB); Workshop on the role of emotion in adaptive behavior and cognitive robotics., Osaka, Japan, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{solomon_modeling_2008,
title = {Modeling Culturally and Emotionally Affected Behavior},
author = {Steve Solomon and Jonathan Gratch and Vadim Bulitko and Michael Lent},
url = {http://www.ict.usc.edu//pubs/Modeling Culturally and Emotionally Affected Behavior.pdf},
year = {2008},
date = {2008-07-01},
booktitle = {The 10th International Conference on the Simulation of Adaptive Behavior (SAB); Workshop on the role of emotion in adaptive behavior and cognitive robotics.},
address = {Osaka, Japan},
abstract = {Culture and emotions have a profound impact on human behavior. Consequently, high-fidelity simulated interactive environments (e.g., trainers and computer games) that involve virtual humans must model socio-cultural and emotional affects on agent behavior. In this paper we discuss two recently fielded systems that do so independently: Culturally Affected Behavior (CAB) and EMotion and Adaptation (EMA). We then propose a simple language that combines the two systems in a natural way thereby enabling simultaneous simulation of culturally and emotionally affected behavior. The proposed language is based on matrix algebra and can be easily implemented on single- or multi-core hardware with a standard matrix package (e.g., MATLAB or a C++ library). We then show how to extend the combined culture and emotion model with an explicit representation of religion and personality profiles.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Blascovich, James J.; Chemers, Martin M.; Hunt, Earl; Ilgen, Daniel R.; Larsen, Randy L.; Mayer, Richard E.; O'Neil, Harold Jr.; McLaughlin, Alan J.; Patel, Vilma L.; Quiñones, Miguel A.; Simons, Anna
Human Behavior in Military Contexts Book
The National Academies Press, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@book{gratch_human_2008,
title = {Human Behavior in Military Contexts},
author = {Jonathan Gratch and James J. Blascovich and Martin M. Chemers and Earl Hunt and Daniel R. Ilgen and Randy L. Larsen and Richard E. Mayer and Harold Jr. O'Neil and Alan J. McLaughlin and Vilma L. Patel and Miguel A. Quiñones and Anna Simons},
url = {http://www.ict.usc.edu/pubs/Human%20Behavior%20in%20Military%20Contexts.pdf},
year = {2008},
date = {2008-06-01},
publisher = {The National Academies Press},
abstract = {Human behavior forms the nucleus of military effectiveness. Humans operating in the complex military system must possess the knowledge, skills, abilities, aptitudes, and temperament to perform their roles effectively in a reliable and predictable manner, and effective military management requires understanding of how these qualities can be best provided and assessed. Scientific research in this area is critical to understanding leadership, training and other personnel issues, social interactions and organizational structures within the military. The U.S. Army Research Institute for the Behavioral and Social Sciences (ARI) asked the National Research Council to provide an agenda for basic behavioral and social research focused on applications in both the short and long-term. The committee responded by recommending six areas of research on the basis of their relevance, potential impact, and timeliness for military needs: intercultural competence; teams in complex environments; technology-based training; nonverbal behavior; emotion; and behavioral neurophysiology. The committee suggests doubling the current budget for basic research for the behavioral and social sciences across U.S. military research agencies. The additional funds can support approximately 40 new projects per year across the committee's recommended research areas. Human Behavior in Military Contexts includes committee reports and papers that demonstrate areas of stimulating, ongoing research in the behavioral and social sciences that can enrich the military's ability to recruit, train, and enhance the performance of its personnel, both organizationally and in its many roles in other cultures.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {book}
}
DeVault, David; Traum, David; Artstein, Ron
Making Grammar-Based Generation Easier to Deploy in Dialogue Systems Proceedings Article
In: 9th SIGdial Workshop on Discourse and Dialogue, Columbus, OH, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{devault_making_2008,
title = {Making Grammar-Based Generation Easier to Deploy in Dialogue Systems},
author = {David DeVault and David Traum and Ron Artstein},
url = {http://ict.usc.edu/pubs/Making%20Grammar-Based%20Generation%20Easier%20to%20Deploy%20in%20Dialogue%20Systems%20.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th SIGdial Workshop on Discourse and Dialogue},
address = {Columbus, OH},
abstract = {We present a development pipeline and associated algorithms designed to make grammarbased generation easier to deploy in implemented dialogue systems. Our approach realizes a practical trade-off between the capabilities of a system's generation component and the authoring and maintenance burdens imposed on the generation content author for a deployed system. To evaluate our approach, we performed a human rating study with system builders who work on a common largescale spoken dialogue system. Our results demonstrate the viability of our approach and illustrate authoring/performance trade-offs between hand-authored text, our grammar-based approach, and a competing shallow statistical NLG technique},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
DeVault, David; Traum, David; Artstein, Ron
Practical Grammar-Based NLG from Examples Proceedings Article
In: The Fifth International Natural Language Generation Conference (INLG 2008), Salt Fork, OH, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{devault_practical_2008,
title = {Practical Grammar-Based NLG from Examples},
author = {David DeVault and David Traum and Ron Artstein},
url = {http://ict.usc.edu/pubs/Practical%20Grammar-Based%20NLG%20from%20Examples%20.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {The Fifth International Natural Language Generation Conference (INLG 2008)},
address = {Salt Fork, OH},
abstract = {We present a technique that opens up grammar-based generation to a wider range of practical applications by dramatically reducing the development costs and linguistic expertise that are required. Our method infers the grammatical resources needed for generation from a set of declarative examples that link surface expressions directly to the application's available semantic representations. The same examples further serve to optimize a run-time search strategy that generates the best output that can be found within an application-speciï¬c time frame. Our method offers substantially lower development costs than hand-crafted grammars for applicationspeciï¬c NLG, while maintaining high output quality and diversity.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
An Evaluation Understudy for Dialogue Coherence Models Proceedings Article
In: 9th SIGdial Workshop on Discourse and Dialogue, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_evaluation_2008,
title = {An Evaluation Understudy for Dialogue Coherence Models},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/An%20Evaluation%20Understudy%20for%20Dialogue%20Coherence%20Models.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th SIGdial Workshop on Discourse and Dialogue},
abstract = {Evaluating a dialogue system is seen as a major challenge within the dialogue research community. Due to the very nature of the task, most of the evaluation methods need a substantial amount of human involvement. Following the tradition in machine translation, summarization and discourse coherence modeling, we introduce the the idea of evaluation understudy for dialogue coherence models. Following (Lapata, 2006), we use the information ordering task as a testbed for evaluating dialogue coherence models. This paper reports findings about the reliability of the information ordering task as applied to dialogues. We find that simple n-gram co-occurrence statistics similar in spirit to BLEU (Papineni et al., 2001) correlate very well with human judgments for dialogue coherence.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
Degrees of Grounding Based on Evidence of Understanding Proceedings Article
In: 9th SIGdial Workshop on Discourse and Dialogue, Columbus, OH, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_degrees_2008,
title = {Degrees of Grounding Based on Evidence of Understanding},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/Degrees%20of%20Grounding%20Based%20on%20Evidence%20of%20Understanding.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th SIGdial Workshop on Discourse and Dialogue},
address = {Columbus, OH},
abstract = {We introduce the Degrees of Grounding model, which defines the extent to which material being discussed in a dialogue has been grounded. This model has been developed and evaluated by a corpus analysis, and includes a set of types of evidence of understanding, a set of degrees of groundedness, a set of grounding criteria, and methods for identifying each of these. We describe how this model can be used for dialogue management.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Pataki, Caroly; Pato, Michele; George, Cheryl St.; Sugar, Jeff; Rizzo, Albert
Virtual Justina: A PTSD Virtual Patient for Clinical Classroom Training Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine, pp. 113–118, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2008-2,
title = {Virtual Justina: A PTSD Virtual Patient for Clinical Classroom Training},
author = {Patrick G. Kenny and Thomas D. Parsons and Caroly Pataki and Michele Pato and Cheryl St. George and Jeff Sugar and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Justina-%20A%20PTSD%20Virtual%20Patient%20for%20Clinical%20Classroom%20Training.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine},
volume = {6},
pages = {113–118},
abstract = {The effects of trauma exposure manifest themselves in a wide range of symptoms: anxiety, post-traumatic stress disorder, fear, and various behavior problems. Effective interview skills are a core competency for the clinicians who will be working with children and adolescents exposed to trauma. The current project aims to improve child and adolescent psychiatry residents, and medical students’ interviewing skills and diagnostic acumen through practice with a female adolescent virtual human with post-traumatic stress disorder. This interaction with a virtual patient provides a context where immediate feedback can be provided regarding trainees’ interviewing skills in terms of psychiatric knowledge, sensitivity, and effectiveness. Results suggest that a virtual standardized patient can generate responses that elicit user questions relevant for PTSD categorization. We conclude with a discussion of the ways in which these capabilities allow virtual patients to serve as unique training tools whose special knowledge and reactions can be continually fed back to trainees. Our initial goal is to focus on a virtual patient with PTSD, but a similar strategy could be applied to teaching a broad variety of psychiatric diagnoses to trainees at every level from medical students, to psychiatry residents, to child and adolescent psychiatry residents.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gandhe, Sudeep; Leuski, Anton; Traum, David
Field Testing of an Interactive Question-Answering Character Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_field_2008,
title = {Field Testing of an Interactive Question-Answering Character},
author = {Ron Artstein and Sudeep Gandhe and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Field%20Testing%20of%20an%20Interactive%20Question-Answering%20Character%20.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {We tested a life-size embodied question-answering character at a convention where he responded to questions from the audience. The character's responses were then rated for coherence. The ratings, combined with speech transcripts, speech recognition results and the character's responses, allowed us to identify where the character needs to improve, namely in speech recognition and providing off-topic responses.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Russ, Thomas; Traum, David; Hovy, Eduard; Robinson, Susan
A Common Ground for Virtual Humans: Using an Ontology in a Natural Language Oriented Virtual Human Architecture Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hartholt_common_2008,
title = {A Common Ground for Virtual Humans: Using an Ontology in a Natural Language Oriented Virtual Human Architecture},
author = {Arno Hartholt and Thomas Russ and David Traum and Eduard Hovy and Susan Robinson},
url = {http://ict.usc.edu/pubs/A%20Common%20Ground%20for%20Virtual%20Humans-%20Using%20an%20Ontology%20in%20a%20Natural%20Language%20Oriented%20Virtual%20Human%20Architecture.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {When dealing with large, distributed systems that use state-of-the-art components, individual components are usually developed in parallel. As development continues, the decoupling invariably leads to a mismatch between how these components internally represent concepts and how they communicate these representations to other components: representations can get out of synch, contain localized errors, or become manageable only by a small group of experts for each module. In this paper, we describe the use of an ontology as part of a complex distributed virtual human architecture in order to enable better communication between modules while improving the overall flexibility needed to change or extend the system. We focus on the natural language understanding capabilities of this architecture and the relationship between language and concepts within the entire system in general and the ontology in particular.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Traum, David; Ittycheriah, Midhun; Henderer, Joe
What would you ask a Conversational Agent? Observations of Human-Agent Dialogues in a Museum Setting Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_what_2008,
title = {What would you ask a Conversational Agent? Observations of Human-Agent Dialogues in a Museum Setting},
author = {Susan Robinson and David Traum and Midhun Ittycheriah and Joe Henderer},
url = {http://ict.usc.edu/pubs/What%20would%20you%20ask%20a%20conversational%20agent.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {Embodied Conversational Agents have typically been constructed for use in limited domain applications, and tested in very specialized environments. Only in recent years have there been more cases of moving agents into wider public applications (e.g. Bell et al., 2003; Kopp et al., 2005). Yet little analysis has been done to determine the differing needs, expectations, and behavior of human users in these environments. With an increasing trend for virtual characters to �go public�, we need to expand our understanding of what this entails for the design and capabilities of our characters. This paper explores these issues through an analysis of a corpus that has been collected since December 2006, from interactions with the virtual character Sgt Blackwell at the Cooper Hewitt Museum in New York. The analysis includes 82 hierarchical categories of user utterances, as well as specific observations on user preferences and behaviors drawn from interactions with Blackwell.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Thiebaux, Marcus; Marshall, Andrew; Marsella, Stacy C.; Kallmann, Marcelo
SmartBody: Behavior Realization for Embodied Conversational Agents Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Estoril, Portugal, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{thiebaux_smartbody_2008,
title = {SmartBody: Behavior Realization for Embodied Conversational Agents},
author = {Marcus Thiebaux and Andrew Marshall and Stacy C. Marsella and Marcelo Kallmann},
url = {http://ict.usc.edu/pubs/SmartBody-%20Behavior%20Realization%20for%20Embodies%20Conversational%20Agents.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Estoril, Portugal},
abstract = {Researchers demand much from their embodied conversational agents (ECA), requiring them to be both life-like, as well as responsive to events in an interactive setting. We find that a flexible combination of animation approaches may be needed to satisfy these needs. In this paper we present SmartBody, an open source modular framework for animating ECAs in real time, based on the notion of hierarchically connected animation controllers. Controllers in SmartBody can employ arbitrary animation algorithms such as keyfram interpolation, motion capture or procedural animation. Controllers can also schedule or combine other controllers. We discuss our architecture in detail, including how we incorporate traditional approaches, and develop the the notion of a controller as a reative module with in a generic gramework, for realizing modular animation control. To illustrate the versatility of the architecture, we also discuss a range of applications that have used SmartBody successfully.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jina; DeVault, David; Marsella, Stacy C.; Traum, David
Thoughts on FML: Behavior Generation in the Virtual Human Communication Architecture Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) First Functional Markup Language Workshop, Estoril, Portugal, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{lee_thoughts_2008,
title = {Thoughts on FML: Behavior Generation in the Virtual Human Communication Architecture},
author = {Jina Lee and David DeVault and Stacy C. Marsella and David Traum},
url = {http://ict.usc.edu/pubs/Thoughts%20on%20FML.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) First Functional Markup Language Workshop},
address = {Estoril, Portugal},
abstract = {We discuss our current architecture for the generation of natural language and non-verbal behavior in ICT virtual humans. We draw on our experience developing this archi- tecture to present our current perspective on several issues related to the standardization of FML and to the SAIBA framework more generally. In particular, we discuss our current use, and non-use, of FML-inspired representations in generating natural language, eye gaze, and emotional dis- plays. We also comment on some of the shortcomings of our design as currently implemented.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
True Emotion vs. Social Intentions in Nonverbal Communication: Towards a Synthesis for Embodied Conversational Agents Journal Article
In: Lecture Notes in Computer Science, vol. 4930, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{gratch_true_2008-1,
title = {True Emotion vs. Social Intentions in Nonverbal Communication: Towards a Synthesis for Embodied Conversational Agents},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/True%20Emotion%20vs%20Social%20Intentions%20in%20Nonverbal%20Communication-%20Towards%20a%20Synthesis%20for%20Embodied%20Conversational%20Agents.pdf},
year = {2008},
date = {2008-01-01},
journal = {Lecture Notes in Computer Science},
volume = {4930},
abstract = {Does a facial expression convey privileged information about a person's mental state or is it a communicative act, divorced from "true" beliefs, desires and intentions? This question is often cast as a dichotomy between competing theoretical perspectives. Theorists like Ekman argue for the primacy of emotion as a determinant of nonverbal behavior: emotions "leak" and only indirectly serve social ends. In contrast, theorists such as Fridlund argue for the primacy of social ends in determining nonverbal displays. This dichotomy has worked to divide virtual character research. Whereas there have been advances in modeling emotion, this work is often seen as irrelevant to the generation of communicative behavior. In this chapter, I review current findings on the interpersonal function of emotion. I'll discuss recent developments in Social Appraisal theory as a way to bridge this dichotomy and our attempts to model these functions within the context of embodied conversational agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Gratch, Jonathan; Wang, Ning; Watt, James H.
Agreeable People Like Agreeable Virtual Humans Proceedings Article
In: Lecture Notes in Computer Science, pp. 253–261, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kang_agreeable_2008,
title = {Agreeable People Like Agreeable Virtual Humans},
author = {Sin-Hwa Kang and Jonathan Gratch and Ning Wang and James H. Watt},
url = {http://ict.usc.edu/pubs/Agreeable%20People%20Like%20Agreeable%20Virtual%20Humans.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Lecture Notes in Computer Science},
pages = {253–261},
abstract = {This study explored associations between the five-factor personality traits of human subjects and their feelings of rapport when they interacted with a virtual agent or real humans. The agent, the Rapport Agent, responded to real human speakers' storytelling behavior, using only nonverbal contingent (i.e., timely) feedback. We further investigated how interactants' personalities were related to the three components of rapport: positivity, attentiveness, and coordination. The results revealed that more agreeable people showed strong self-reported rapport and weak behavioral measured rapport in the disfluency dimension when they interacted with the Rapport Agent, while showing no significant associations between agreeableness and self-reported rapport, nor between agreeableness and the disfluency dimension when they interacted with real humans. The conclusions provide fundamental data to further develop a rapport theory that would contribute to evaluating and enhancing the interactional fidelity of an agent on the design of virtual humans for social skills training and therapy.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
A Virtual Human Dialogue Model for Non-team Interaction Book Section
In: Recent Trends in Discourse and Dialogue, vol. 39, pp. 45–67, Springer, Dordecht, The Netherlands, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{traum_virtual_2008,
title = {A Virtual Human Dialogue Model for Non-team Interaction},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Human%20Dialogue%20Model%20for%20Non-team%20Interaction.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Recent Trends in Discourse and Dialogue},
volume = {39},
pages = {45–67},
publisher = {Springer},
address = {Dordecht, The Netherlands},
series = {Text, Speech and Language Technology},
abstract = {We describe the dialogue model for the virtual humans developed at the Institute for Creative Technologies at the University of Southern California. The dialogue model contains a rich set of information state and dialogue moves to allow a wide range of behaviour in multimodal, multiparty interaction. We extend this model to enable non-team negotiation, using ideas from social science literature on negotiation and implemented strategies and dialogue moves for this area. We present a virtual human doctor who uses this model to engage in multimodal negotiation dialogue with people from other organisations. The doctor is part of the SASO-ST system, used for training for non-team interactions.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Carre, David; Levasseur, Marco; Gratch, Jonathan; Jacopin, Eric
Multimodal Toolbox: Analyzing Gestures Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 03 2008, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{carre_multimodal_2008,
title = {Multimodal Toolbox: Analyzing Gestures},
author = {David Carre and Marco Levasseur and Jonathan Gratch and Eric Jacopin},
url = {http://ict.usc.edu/pubs/ICT%20TR%2003%202008.pdf},
year = {2008},
date = {2008-01-01},
number = {ICT TR 03 2008},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Rapport between people and virtual human agents is not limited to just speech. There are many non-verbal behaviors such as gestures or facial expressions that can express feelings or convey a message. One of the challenges in making an agent appear more realistic is to make his non-verbal behaviors appear more natural. To accomplish this, it is essential to find out how and when gestures are performed. In order to determine how gestures are performed, it is necessary to assess different appearances of the same gesture and the mapping between their respective function. To determine when gestures are performed, the key is to find relevant contextual features and their links with gestures, which will lead to the prediction of the moment they should be performed. Finally, both of these issues can now be tackled with the provided toolbox. Preliminary results show that we have some gesture pattern. Beside, we were able, based on contextual features, to predict when the agent should nod his head. Early results appear to show the agent nods at an opportune time. Moreover, this toolbox generalizes the results to other kind of gestures than head nods, which is the goal of this study.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Rizzo, Albert; Graap, Ken; Perlman, Karen; McLay, Robert N.; Rothbaum, Barbara O.; Reger, Greg; Parsons, Thomas D.; Difede, JoAnn; Pair, Jarrell
Virtual Iraq: Initial Results from a VR Exposure Therapy Application for Combat-Related PTSD Journal Article
In: Medicine Meets Virtual Reality, vol. 16, pp. 420–425, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_virtual_2008,
title = {Virtual Iraq: Initial Results from a VR Exposure Therapy Application for Combat-Related PTSD},
author = {Albert Rizzo and Ken Graap and Karen Perlman and Robert N. McLay and Barbara O. Rothbaum and Greg Reger and Thomas D. Parsons and JoAnn Difede and Jarrell Pair},
url = {http://ict.usc.edu/pubs/Virtual%20Iraq-%20Initial%20Results%20from%20a%20VR%20Exposure%20Therapy%20Application%20for%20Combat-Related%20PTSD.pdf},
year = {2008},
date = {2008-01-01},
journal = {Medicine Meets Virtual Reality},
volume = {16},
pages = {420–425},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale and brief description of a Virtual Iraq PTSD VR therapy application and present initial findings from its use with PTSD patients. Thus far, Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Ft. Lewis, Camp Pendleton, Emory University, Weill Cornell Medical College, Walter Reed Army Medical Center, San Diego Naval Medical Center and 12 other sites.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Rizzo, Albert
Evaluation of Justina: A Virtual Patient with PTSD Proceedings Article
In: Lecture Notes in Computer Science, pp. 394–408, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_evaluation_2008,
title = {Evaluation of Justina: A Virtual Patient with PTSD},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Justina-%20A%20Virtual%20Patient%20with%20PTSD.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Lecture Notes in Computer Science},
pages = {394–408},
abstract = {Recent research has established the potential for virtual characters to act as virtual standardized patients VP for the assessment and training of novice clinicians. We hypothesize that the responses of a VP simulating Post Traumatic Stress Disorder (PTSD) in an adolescent female could elicit a number of diagnostic mental health specific questions (from novice clinicians) that are necessary for differential diagnosis of the condition. Composites were developed to reflect the relation between novice clinician questions and VP responses. The primary goal in this study was evaluative: can a VP generate responses that elicit user questions relevant for PTSD categorization? A secondary goal was to investigate the impact of psychological variables upon the resulting VP Question/Response composites and the overall believability of the system.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Gratch, Jonathan; Wang, Ning; Watt, James H.
Does the Contingency of Agents' Nonverbal Feedback Affect Users' Social Anxiety? Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 120–127, Estoril, Portugal, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kang_does_2008,
title = {Does the Contingency of Agents' Nonverbal Feedback Affect Users' Social Anxiety?},
author = {Sin-Hwa Kang and Jonathan Gratch and Ning Wang and James H. Watt},
url = {http://ict.usc.edu/pubs/Does%20the%20Contingency%20of%20Agents%20Nonverbal%20Feedback%20Affect%20Users%20Social%20Anxiety.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {120–127},
address = {Estoril, Portugal},
abstract = {We explored the association between users' social anxiety and the interactional fidelity of an agent (also referred to as a virtual human), specifically addressing whether the contingency of agents' nonverbal feedback affects the relationship between users' social anxiety and their feelings of rapport, performance, or judgment on interaction partners. This subject was examined across four experimental conditions where participants interacted with three different types of agents and a real human. The three types of agents included the Non-Contingent Agent, the Responsive Agent (opposite to the Non-Contingent Agent), and the Mediated Agent (controlled by a real human). The results indicated that people having greater social anxiety would feel less rapport and show worse performance while feeling more embarrassment if they experience the untimely feedback of the Non-Contingent Agent. The results also showed people having more anxiety would trust real humans less as their interaction partners. We discuss the implication of this relationship between social anxiety in a human subject and the interactional fidelity of an agent on the design of virtual characters for social skills training and therapy.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Novielli, Nicole; Carnevale, Peter; Gratch, Jonathan
Cooperation Attitude in Negotiation Dialogs Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC) Workshop on Corpora for Research on Emotion and Affect, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{novielli_cooperation_2008,
title = {Cooperation Attitude in Negotiation Dialogs},
author = {Nicole Novielli and Peter Carnevale and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Cooperation%20Attitude%20in%20Negotiation%20Dialogs.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC) Workshop on Corpora for Research on Emotion and Affect},
abstract = {We propose an annotation scheme for a corpus of negotiation dialogs that was collected in the scope of a study about the effect of negotiation attitudes and time pressure on dialog patterns.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Kok, Iwan; Gratch, Jonathan
Context-based Recognition during Human Interactions: Automatic Feature Selection and Encoding Dictionary Proceedings Article
In: 10th International Conference on Multimodal Interfaces (ICMI 2008), 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{morency_context-based_2008,
title = {Context-based Recognition during Human Interactions: Automatic Feature Selection and Encoding Dictionary},
author = {Louis-Philippe Morency and Iwan Kok and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Context-based%20Recognition%20during%20Human%20Interactions-%20Automatic%20Feature%20Selection%20and%20Encoding%20Dictionary.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {10th International Conference on Multimodal Interfaces (ICMI 2008)},
abstract = {During face-to-face conversation, people use visual feedback such as head nods to communicate relevant information and to synchronize rhythm between participants. In this paperwe describe how contextual information from other participants can be used to predict visual feedback and improve recognition of head gestures in human-human interactions. The main challenges addressed in this paper are optimal feature representation using an encoding dictionary and automatic selection of the optimal feature-encoding pairs. We evaluate our approach on a dataset involving 78 human participants. Using a discriminative approach to multi-modal integration, our context-based recognizer significantly improves head gesture recognition performance over a vision- only recognizer.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
True Emotion vs. Social Intentions in Nonverbal Communication: Towards a Synthesis for Embodied Conversational Agents Proceedings Article
In: Modeling Communication with Robots and Virtual Humans, pp. 181–197, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_true_2008,
title = {True Emotion vs. Social Intentions in Nonverbal Communication: Towards a Synthesis for Embodied Conversational Agents},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/True%20Emotion%20vs%20Social%20Intentions%20in%20Nonverbal%20Communication-%20Towards%20a%20Synthesis%20for%20Embodied%20Conversational%20Agents.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Modeling Communication with Robots and Virtual Humans},
pages = {181–197},
abstract = {Does a facial expression convey privileged information about a person's mental state or is it a communicative act, divorced from "true" beliefs, desires and intentions? This question is often cast as a dichotomy between competing theoretical perspectives. Theorists like Ekman argue for the primacy of emotion as a determinant of nonverbal behavior: emotions "leak" and only indirectly serve social ends. In contrast, theorists such as Fridlund argue for the primacy of social ends in determining nonverbal displays. This dichotomy has worked to divide virtual character research. Whereas there have been advances in modeling emotion, this work is often seen as irrelevant to the generation of communicative behavior. In this chapter, I review current findings on the interpersonal function of emotion. I'll discuss recent developments in Social Appraisal theory as a way to bridge this dichotomy and our attempts to model these functions within the context of embodied conversational agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2007
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Swartout, William; Traum, David; Marsella, Stacy C.; Piepol, Diane
Building Interactive Virtual Humans for Training Environments Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), Orlando, FL, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_building_2007,
title = {Building Interactive Virtual Humans for Training Environments},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and William Swartout and David Traum and Stacy C. Marsella and Diane Piepol},
url = {http://ict.usc.edu/pubs/Building%20Interactive%20Virtual%20Humans%20for%20Training%20Environments.pdf},
year = {2007},
date = {2007-11-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {There is a great need in the Joint Forces to have human to human interpersonal training for skills such as negotiation, leadership, interviewing and cultural training. Virtual environments can be incredible training tools if used properly and used for the correct training application. Virtual environments have already been very successful in training Warfighters how to operate vehicles and weapons systems. At the Institute for Creative Technologies (ICT) we have been exploring a new question: can virtual environments be used to train Warfighters in interpersonal skills such as negotiation, tactical questioning and leadership that are so critical for success in the contemporary operating environment? Using embodied conversational agents to create this type of training system has been one of the goals of the Virtual Humans project at the institute. ICT has a great deal of experience building complex, integrated and immersive training systems that address the human factor needs for training experiences. This paper will address the research, technology and value of developing virtual humans for training environments. This research includes speech recognition, natural language understanding & generation, dialogue management, cognitive agents, emotion modeling, question response managers, speech generation and non-verbal behavior. Also addressed will be the diverse set of training environments we have developed for the system, from single computer laptops to multi-computer immersive displays to real and virtual integrated environments. This paper will also discuss the problems, issues and solutions we encountered while building these systems. The paper will recount subject testing we have performed in these environments and results we have obtained from users. Finally the future of this type of Virtual Humans technology and training applications will be discussed.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Roque, Antonio; Leuski, Anton; Georgiou, Panayiotis G.; Gerten, Jillian; Martinovski, Bilyana; Narayanan, Shrikanth; Robinson, Susan; Vaswani, Ashish
Hassan: A Virtual Human for Tactical Questioning Proceedings Article
In: 8th SIGdial Workshop on Discourse and Dialogue, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_hassan_2007,
title = {Hassan: A Virtual Human for Tactical Questioning},
author = {David Traum and Antonio Roque and Anton Leuski and Panayiotis G. Georgiou and Jillian Gerten and Bilyana Martinovski and Shrikanth Narayanan and Susan Robinson and Ashish Vaswani},
url = {http://ict.usc.edu/pubs/Hassan-%20A%20Virtual%20Human%20for%20Tactical%20Questioning%20.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {8th SIGdial Workshop on Discourse and Dialogue},
address = {Antwerp, Belgium},
abstract = {We present Hassan, a virtual human who engages in Tactical Questioning dialogues. We describe the tactical questioning domain, the motivation for this character, the speciï¬c architecture and present brief examples and an evaluation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jonsdottir, Gudny Ragna; Gratch, Jonathan; Fast, Edward; Thórisson, Kristinn R.
Fluid Semantic Back-Channel Feedback in Dialogue: Challenges & Progress Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jonsdottir_fluid_2007,
title = {Fluid Semantic Back-Channel Feedback in Dialogue: Challenges & Progress},
author = {Gudny Ragna Jonsdottir and Jonathan Gratch and Edward Fast and Kristinn R. Thórisson},
url = {http://ict.usc.edu/pubs/Fluid%20Semantic%20Back-Channel%20Feedback%20in%20Dialogue-%20Challenges%20&%20Progress.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
address = {Paris, France},
abstract = {Participation in natural, real-time dialogue calls for behaviors supported by perception-action cycles from around 100 msec and up. Generating certain kinds of such behaviors, namely envelope feedback, has been possible since the early 90s. Real-time backchannel feedback related to the content of a dialogue has been more difficult to achieve. In this paper we describe our progress in allowing virtual humans to give rapid within-utterance content-specific feedback in real-time dialogue. We present results from human-subject studies of content feedback, where results show that content feedback to a particular phrase or word in human-human dialogue comes 560-2500 msec from the phrase's onset, 1 second on average. We also describe a system that produces such feedback with an autonomous agent in limited topic domains, present performance data of this agent in human-agent interactions experiments and discuss technical challenges in light of the observed human-subject data.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
A Model of Compliance and Emotion for Potentially Adversarial Dialogue Agents Proceedings Article
In: 8th SIGdial Workshop on Discourse and Dialogue, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_model_2007,
title = {A Model of Compliance and Emotion for Potentially Adversarial Dialogue Agents},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/A%20Model%20of%20Compliance%20and%20Emotion%20for%20Potentially%20Adversarial%20Dialogue%20%20Agents.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {8th SIGdial Workshop on Discourse and Dialogue},
address = {Antwerp, Belgium},
abstract = {We present a model of compliance, for domains in which a dialogue agent may become adversarial. This model includes a set of emotions and a set of levels of compliance, and strategies for changing these.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Oh, Sejin; Gratch, Jonathan; Woontack, Woo
Explanatory Style for Socially Interactive Agents Proceedings Article
In: Lecture Notes in Computer Science, Lisbon, Portugal, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{oh_explanatory_2007,
title = {Explanatory Style for Socially Interactive Agents},
author = {Sejin Oh and Jonathan Gratch and Woo Woontack},
url = {http://ict.usc.edu/pubs/Explanatory%20Style%20for%20Socially%20Interactive%20Agents.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Lecture Notes in Computer Science},
address = {Lisbon, Portugal},
abstract = {Recent years have seen an explosion of interest in computational models of socio-emotional processes, both as a mean to deepen understanding of human behavior and as a mechanism to drive a variety of training and entertainment applications. In contrast with work on emotion, where research groups have developed detailed models of emotional processes, models of personality have emphasized shallow surface behavior. Here, we build on computational appraisal models of emotion to better characterize dispositional differences in how people come to understand social situations. Known as explanatory style, this dispositional factor plays a key role in social interactions and certain socio-emotional disorders, such as depression. Building on appraisal and attribution theories, we model key conceptual variables underlying the explanatory style, and enable agents to exhibit different explanatory tendencies according to their personalities. We describe an interactive virtual environment that uses the model to allow participants to explore individual differences in the explanation of social events, with the goal of encouraging the development of perspective taking and emotion-regulatory skills.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
Creating Spoken Dialogue Characters from Corpora without Annotations Proceedings Article
In: Interspeech 2007, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_creating_2007,
title = {Creating Spoken Dialogue Characters from Corpora without Annotations},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/Creating%20Spoken%20Dialogue%20Characters%20from%20Corpora%20without%20Annotations%20.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {Interspeech 2007},
address = {Antwerp, Belgium},
abstract = {Virtual humans are being used in a number of applications, including simulation-based training, multi-player games, and museum kiosks. Natural language dialogue capabilities are an essential part of their human-like persona. These dialogue systems have a goal of being believable and generally have to operate within the bounds of their restricted domains. Most dialogue systems operate on a dialogue-act level and require extensive annotation efforts. Semantic annotation and rule authoring have long been known as bottlenecks for developing dialogue systems for new domains. In this paper, we investigate several dialogue models for virtual humans that are trained on an unannotated human-human corpus. These are inspired by information retrieval and work on the surface text level. We evaluate these in text-based and spoken interactions and also against the upper baseline of human-human dialogues.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ai, Hua; Roque, Antonio; Leuski, Anton; Traum, David
Using Information State to Improve Dialogue Move Identification in a Spoken Dialogue System Proceedings Article
In: Proceedings of the 10th Interspeech Conference, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ai_using_2007,
title = {Using Information State to Improve Dialogue Move Identification in a Spoken Dialogue System},
author = {Hua Ai and Antonio Roque and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Using%20Information%20State%20to%20Improve%20Dialogue%20Move%20Identification%20in%20a%20Spoken%20Dialogue%20System.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {Proceedings of the 10th Interspeech Conference},
address = {Antwerp, Belgium},
abstract = {In this paper we investigate how to improve the performance of a dialogue move and parameter tagger for a taskoriented dialogue system using the information-state approach. We use a corpus of utterances and information states from an implemented system to train and evaluate a tagger, and then evaluate the tagger in an on-line system. Use of information state context is shown to improve performance of the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Roque, Antonio; Vaswani, Ashish; Traum, David; Hernandez, Charles; Millspaugh, Bill
Evaluation of a Spoken Dialogue System for Virtual Reality Call for Fire Training Proceedings Article
In: 10th International Pragmatics Conference, Gotenborg, Sweden, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_evaluation_2007,
title = {Evaluation of a Spoken Dialogue System for Virtual Reality Call for Fire Training},
author = {Susan Robinson and Antonio Roque and Ashish Vaswani and David Traum and Charles Hernandez and Bill Millspaugh},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20a%20Spoken%20Dialogue%20System%20for%20Virtual%20Reality%20Call%20for%20Fire%20Training.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {10th International Pragmatics Conference},
address = {Gotenborg, Sweden},
abstract = {We present an evaluation of a spoken dialogue system that engages in dialogues with soldiers training in an immersive Call for Fire (CFF) simulation. We briefly describe aspects of the Joint Fires and Effects Trainer System, and the Radiobot-CFF dialogue system, which can engage in voice communications with a trainee in call for fire dialogues. An experiment is described to judge performance of the Radiobot CFF system compared with human radio operators. Results show that while the current version of the system is not quite at humanperformance levels, it is already viable for training interaction and as an operator-controller aid.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Traum, David; Marsella, Stacy C.; Swartout, William
The More the Merrier: Multi-Party Negotiation with Virtual Humans Proceedings Article
In: AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence, pp. 1970–1971, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_more_2007,
title = {The More the Merrier: Multi-Party Negotiation with Virtual Humans},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and David Traum and Stacy C. Marsella and William Swartout},
url = {http://ict.usc.edu/pubs/The%20More%20the%20Merrier-%20Multi-Party%20Negotiation%20with%20Virtual%20Humans.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence},
volume = {2},
pages = {1970–1971},
abstract = {The goal of the Virtual Humans Project at the University of Southern California�s Institute for Creative Technologies is to enrich virtual training environments with virtual humans � autonomous agents that support face-to-face interaction with trainees in a variety of roles � through bringing together many different areas of research including speech recognition, natural language understanding, dialogue management, cognitive modeling, emotion modeling, non-verbal behavior and speech and knowledge management. The demo at AAAI will focus on our work using virtual humans to train negotiation skills. Conference attendees will negotiate with a virtual human doctor and elder to try to move a clinic out of harm�s way in single and multi-party negotiation scenarios using the latest iteration of our Virtual Humans framework. The user will use natural speech to talk to the embodied agents, who will respond in accordance with their internal task model and state. The characters will carry out a multi-party dialogue with verbal and non-verbal behavior. A video of a single-party version of the scenario was shown at AAAI-06. This new interactive demo introduces several new features, including multi-party negotiation, dynamically generated non-verbal behavior and a central ontology.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Rizzo, Albert; Parsons, Thomas D.; Gratch, Jonathan; Swartout, William
A Virtual Human Agent for Training Novice Therapist Clinical Interviewing Skills Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine, pp. 197–210, Washington D.C., 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2007-1,
title = {A Virtual Human Agent for Training Novice Therapist Clinical Interviewing Skills},
author = {Patrick G. Kenny and Albert Rizzo and Thomas D. Parsons and Jonathan Gratch and William Swartout},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Human%20Agent%20for%20Training%20Novice%20Therapist%20Clinical%20Interviewing%20Skills.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine},
volume = {4722},
pages = {197–210},
address = {Washington D.C.},
abstract = {Virtual Reality (VR) is rapidly evolving into a pragmatically usable technology for mental health (MH) applications. Over the last five years, the technology for creating virtual humans (VHs) has evolved to the point where they are no longer regarded as simple background characters, but rather can serve a functional interactional role. Our current project involves the construction of a natural language-capable virtual client named “Justin,” which derived from a military negotiation train- ing tool into a virtual therapy patient for training novice clinicians the art of clinical interviewing with a resistant client. Justin portrays a 16-year old male with a conduct disorder who is being forced to par- ticipate in therapy by his family. The system uses a sophisticated natural language interface that al- lows novice clinicians to practice asking interview questions in an effort to create a positive therapeu- tic alliance with this very challenging virtual client. Herein we proffer a description of our iterative de- sign process and outline our long term vision.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dynamic Movement and Positioning of Embodied Agents in Multiparty Conversations Proceedings Article
In: ACL 2007 Workshop on Embodied Language Processing, Prague, Czech Republic, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_dynamic_2007,
title = {Dynamic Movement and Positioning of Embodied Agents in Multiparty Conversations},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dynamic%20Movement%20and%20Positioning%20of%20Embodied%20Agents%20in%20Multiparty%20%20Conversations.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {ACL 2007 Workshop on Embodied Language Processing},
address = {Prague, Czech Republic},
abstract = {For embodied agents to engage in realistic multiparty conversation, they must stand in appropriate places with respect to other agents and the environment. When these factors change, for example when an agent joins a conversation, the agents must dynamically move to a new location and/or orientation to accommodate. This paper presents an algorithm for simulating the movement of agents based on observed human behavior using techniques developed for pedestrian movement in crowd simulations. We extend a previous group conversation simulation to include an agent motion algorithm. We examine several test cases and show how the simulation generates results that mirror real-life conversation settings.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Thagard, Paul; Ditto, Peter; Gratch, Jonathan; Marsella, Stacy C.; Westen, Drew
Emotional Cognition in the Real World Proceedings Article
In: Proceedings of the Twenty-Ninth Annual Meeting of the Cognitive Science Society, Nashville, TN, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{thagard_emotional_2007,
title = {Emotional Cognition in the Real World},
author = {Paul Thagard and Peter Ditto and Jonathan Gratch and Stacy C. Marsella and Drew Westen},
url = {http://ict.usc.edu/pubs/Emotional%20Cognition%20in%20the%20Real%20World.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Proceedings of the Twenty-Ninth Annual Meeting of the Cognitive Science Society},
address = {Nashville, TN},
abstract = {There is increasing appreciation in cognitive science of the impact of emotions on many kinds of thinking, from decision making to scientific discovery. This appreciation has developed in all the fields of cognitive science, including, psychology, philosophy, artificial intelligence, and linguistics, and anthropology. The purpose of the proposed symposium is to report and discuss new investigations of the impact of emotion on cognitive processes, in particular ones that are important in real life situations. We will approach the practical importance of emotional cognition from a variety of disciplinary perspectives: social psychology (Ditto), clinical psychology (Westen), computer science (Gratch and Marsella), and philosophy and neuroscience (Thagard). In order to provide integration across these approaches, we will try to address a fundamental set of questions, including: 1. How do emotions interact with basic cognitive processes? 2. What are the positive contributions of emotions to various kinds of thinking in real world situations? 3. How do emotions sometimes bias thinking in real world situations? 4. How can understanding of the psychology and neuroscience of emotional cognition be used to improve the effectiveness of real world thinking?},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
The Architectural Role of Emotion in Cognitive Systems Book Section
In: Integrated Models of Cognitive Systems, Oxford University Press, New York, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{gratch_architectural_2007,
title = {The Architectural Role of Emotion in Cognitive Systems},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/The%20Architectural%20Role%20of%20Emotion%20in%20Cognitive%20Systems.pdf},
year = {2007},
date = {2007-03-01},
booktitle = {Integrated Models of Cognitive Systems},
publisher = {Oxford University Press},
address = {New York},
abstract = {In this chapter, we will revive an old argument that theories of human emotion can give insight into the design and control of complex cognitive systems. In particular, we claim that appraisal theories of emotion provide essential insight into the influences of emotion over cognition and can help translate such findings into concrete guidance for the design of cognitive systems. Ap- praisal theory claims that emotion plays a central and functional role in sensing external events, characterizing them as opportunity or threats and recruiting the cognitive, physical and social resources needed to adaptively respond. Further, because it argues for a close association be- tween emotion and cognition, the theoretical claims of appraisal theory can be recast as a re- quirement specification for how to build a cognitive system. This specification asserts a set of judgments that must be supported in order to correctly interpret and respond to stimuli and pro- vides a unifying framework for integrating these judgments into a coherent physical or social re- sponse. This chapter elaborates argument in some detail based on our joint experience in build- ing complex cognitive systems and computational models of emotion.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Leuski, Anton; Rizzo, Albert
Virtual Patients for Clinical Therapist Skills Training Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 197–210, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2007,
title = {Virtual Patients for Clinical Therapist Skills Training},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Anton Leuski and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Patients%20for%20Clinical%20Therapist%20Skills%20Training.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {197–210},
address = {Paris, France},
abstract = {Virtual humans offer an exciting and powerful potential for rich interactive experiences. Fully embodied virtual humans are growing in capability, ease, and utility. As a result, they present an opportunity for expanding research into burgeoning virtual patient medical applications. In this paper we consider the ways in which one may go about building and applying virtual human technology to the virtual patient domain. Specifically we aim to show that virtual human technology may be used to help develop the interviewing and diagnostics skills of developing clinicians. Herein we proffer a description of our iterative design process and preliminary results to show that virtual patients may be a useful adjunct to psychotherapy education.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}