Publications
Search
Swartout, William R.
Virtual Humans as Centaurs: Melding Real and Virtual Book Section
In: Virtual, Augmented and Mixed Reality, vol. 9740, pp. 356–359, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39906-5 978-3-319-39907-2.
@incollection{swartout_virtual_2016,
title = {Virtual Humans as Centaurs: Melding Real and Virtual},
author = {William R. Swartout},
url = {http://link.springer.com/10.1007/978-3-319-39907-2_34},
isbn = {978-3-319-39906-5 978-3-319-39907-2},
year = {2016},
date = {2016-06-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9740},
pages = {356–359},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Centaurs are man-machine teams that can work together on problems and can out-perform, either people or computers working alone in domains as varied as chess-playing and protein folding. But the centaur of Greek mythology was not a team, but rather a hybrid of man and horse with some of the characteristics of each. In this paper, we outline our efforts to build virtual humans, which might be considered hybrid centaurs, combining features of both people and machines. We discuss experimental evidence that shows that these virtual human hybrids can outperform both people and inanimate processes in some tasks such as medical interviewing.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Artstein, Ron; Gainer, Alesia; Georgila, Kallirroi; Leuski, Anton; Shapiro, Ari; Traum, David
New Dimensions in Testimony Demonstration Proceedings Article
In: Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations, pp. 32–36, Association for Computational Linguistics, San Diego, California, 2016.
@inproceedings{artstein_new_2016,
title = {New Dimensions in Testimony Demonstration},
author = {Ron Artstein and Alesia Gainer and Kallirroi Georgila and Anton Leuski and Ari Shapiro and David Traum},
url = {http://www.aclweb.org/anthology/N16-3007},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations},
pages = {32–36},
publisher = {Association for Computational Linguistics},
address = {San Diego, California},
abstract = {New Dimensions in Testimony is a prototype dialogue system that allows users to conduct a conversation with a real person who is not available for conversation in real time. Users talk to a persistent representation of Holocaust survivor Pinchas Gutter on a screen, while a dialogue agent selects appropriate responses to user utterances from a set of pre-recorded video statements, simulating a live conversation. The technology is similar to existing conversational agents, but to our knowledge this is the first system to portray a real person. The demonstration will show the system on a range of screens (from mobile phones to large TVs), and allow users to have individual conversations with Mr. Gutter.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mizukami, Masahiro; Traum, David; Yoshino, Koichiro; Neubig, Graham; Nakamura, Satoshi
Word and Dialogue Act Entrainment Analysis based on User Profile Proceedings Article
In: Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence, Kitakyushu, Japan, 2016.
@inproceedings{mizukami_word_2016,
title = {Word and Dialogue Act Entrainment Analysis based on User Profile},
author = {Masahiro Mizukami and David Traum and Koichiro Yoshino and Graham Neubig and Satoshi Nakamura},
url = {https://kaigi.org/jsai/webprogram/2016/pdf/356.pdf},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence},
address = {Kitakyushu, Japan},
abstract = {Patterns of dialogue act and word selection are observable in dialogue. Entrainment is the factor that might account for these patterns. We test the entrainment hypotheses using the switchboard corpus, comparing speech of different speakers from different parts of the dialogue, but also speech of the same speaker at different points. Our ⬚ndings replicate previous studies that dialogue participants converge toward each other in word choice, but we also investigate novel measures of entrainment of dialogue act selection, and word choice for speci⬚c dialogue acts. These studies inform a design for dialogue systems that would show human-like degrees of entrainment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Nazari, Zahra; Johnson, Emmanuel
The Misrepresentation Game: How to win at negotiation while seeming like a nice guy Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 728–737, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{gratch_misrepresentation_2016,
title = {The Misrepresentation Game: How to win at negotiation while seeming like a nice guy},
author = {Jonathan Gratch and Zahra Nazari and Emmanuel Johnson},
url = {http://dl.acm.org/citation.cfm?id=2937031},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {728–737},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Recently, interest has grown in agents that negotiate with people: to teach negotiation, to negotiate on behalf of people, and as a chal-lenge problem to advance artificial social intelligence. Humans ne-gotiate differently from algorithmic approaches to negotiation: peo-ple are not purely self-interested but place considerable weight on norms like fairness; people exchange information about their men-tal state and use this to judge the fairness of a social exchange; and people lie. Here, we focus on lying. We present an analysis of how people (or agents interacting with people) might optimally lie (maximally benefit themselves) while maintaining the illusion of fairness towards the other party. In doing so, we build on concepts from game theory and the preference-elicitation literature, but ap-ply these to human, not rational, behavior. Our findings demon-strate clear benefits to lying and provide empirical support for a heuristic – the “fixed-pie lie” – that substantially enhances the effi-ciency of such deceptive algorithms. We conclude with implica-tions and potential defenses against such manipulative techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Nye, Benjamin D.; Hartholt, Arno; Reilly, Adam; Graesser, Arthur C.; VanLehn, Kurt; Wetzel, Jon; Liewer, Matt; Morbini, Fabrizio; Morgan, Brent; Wang, Lijia; Benn, Grace; Rosenberg, Milton
Designing a Personal Assistant for Life-Long Learning (PAL3) Proceedings Article
In: Proceedings of The Twenty-Ninth International Flairs Conference, pp. 491–496, AAAI Press, Key Largo, FL, 2016, ISBN: 978-1-57735-756-8.
@inproceedings{swartout_designing_2016,
title = {Designing a Personal Assistant for Life-Long Learning (PAL3)},
author = {William Swartout and Benjamin D. Nye and Arno Hartholt and Adam Reilly and Arthur C. Graesser and Kurt VanLehn and Jon Wetzel and Matt Liewer and Fabrizio Morbini and Brent Morgan and Lijia Wang and Grace Benn and Milton Rosenberg},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12793},
isbn = {978-1-57735-756-8},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of The Twenty-Ninth International Flairs Conference},
pages = {491–496},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Learners’ skills decay during gaps in instruction, since they lack the structure and motivation to continue studying. To meet this challenge, the PAL3 system was designed to accompany a learner throughout their career and mentor them to build and maintain skills through: 1) the use of an embodied pedagogical agent (Pal), 2) a persistent learning record that drives a student model which estimates forgetting, 3) an adaptive recommendation engine linking to both intelligent tutors and traditional learning resources, and 4) game-like mechanisms to promote engagement (e.g., leaderboards, effort-based point rewards, unlocking customizations). The design process for PAL3 is discussed, from the perspective of insights and revisions based on a series of formative feedback and evaluation sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan
IAGO: Interactive Arbitration Guide Online Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1510–1512, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{mell_iago_2016,
title = {IAGO: Interactive Arbitration Guide Online},
author = {Johnathan Mell and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937230},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1510–1512},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Automated negotiation between two agents has been the subject of much research focused on optimization and efficiency. Howev-er, human-agent negotiation represents a field in which real-world considerations can be more fully explored. Furthermore, teaching negotiation and other interpersonal skills requires long periods of practice with open-ended dialogues and partners. The API pre-sented in this paper represents a novel platform on which to con-duct human-agent research and facilitate teaching negotiation tactics in a longitudinal way. We present a prototype demonstra-tion that is real-time, rapidly distributable, and allows more ac-tions than current platforms of negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Uryupina, Olga; Artstein, Ron; Bristot, Antonella; Cavicchio, Federica; Rodriguez, Kepa; Poesio, Massimo
ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 2058–2062, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{uryupina_arrau_2016,
title = {ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions},
author = {Olga Uryupina and Ron Artstein and Antonella Bristot and Federica Cavicchio and Kepa Rodriguez and Massimo Poesio},
url = {http://www.lrec-conf.org/proceedings/lrec2016/summaries/1121.html},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {2058–2062},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {This paper presents a second release of the ARRAU dataset: a multi-domain corpus with thorough linguistically motivated annotation of anaphora and related phenomena. Building upon the first release almost a decade ago, a considerable effort had been invested in improving the data both quantitatively and qualitatively. Thus, we have doubled the corpus size, expanded the selection of covered phenomena to include referentiality and genericity and designed and implemented a methodology for enforcing the consistency of the manual annotation. We believe that the new release of ARRAU provides a valuable material for ongoing research in complex cases of coreference as well as for a variety of related tasks. The corpus is publicly available through LDC.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Eugene Laksana Satan; Morency, Louis-Philippe; Scherer, Stefen
Learning Representations of Affect from Speech Proceedings Article
In: ICLR 2016, ICLR, San Juan, Puerto Rico, 2016.
@inproceedings{ghosh_eugene_laksana_satan_learning_2016,
title = {Learning Representations of Affect from Speech},
author = {Eugene Laksana Satan Ghosh and Louis-Philippe Morency and Stefen Scherer},
url = {http://arxiv.org/pdf/1511.04747.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {ICLR 2016},
publisher = {ICLR},
address = {San Juan, Puerto Rico},
abstract = {There has been a lot of prior work on representation learning for speech recognition applications, but not much emphasis has been given to an investigation of effective representations of affect from speech, where the paralinguistic elements of speech are separated out from the verbal content. In this paper, we explore denoising autoencoders for learning paralinguistic attributes, i.e. categorical and dimensional affective traits from speech. We show that the representations learnt by the bottleneck layer of the autoencoder are highly discriminative of activation intensity and at separating out negative valence (sadness and anger) from positive valence (happiness). We experiment with different input speech features (such as FFT and log-mel spectrograms with temporal context windows), and different autoencoder architectures (such as stacked and deep autoencoders). We also learn utterance specific representations by a combination of denoising autoencoders and BLSTM based recurrent autoencoders. Emotion classification is performed with the learnt temporal/dynamic representations to evaluate the quality of the representations. Experiments on a well-established real-life speech dataset (IEMOCAP) show that the learnt representations are comparable to state of the art feature extractors (such as voice quality features and MFCCs) and are competitive with state-of-the-art approaches at emotion and dimensional affect recognition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Collins, Kathryn J.; Traum, David
Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 118–124, European Language Resources Association, Portorož, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{collins_towards_2016,
title = {Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue},
author = {Kathryn J. Collins and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/354_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {118–124},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {In this paper, we present a taxonomy of stories told in dialogue. We based our scheme on prior work analyzing narrative structure and method of telling, relation to storyteller identity, as well as some categories particular to dialogue, such as how the story gets introduced. Our taxonomy currently has 5 major dimensions, with most having sub-dimensions - each dimension has an associated set of dimension-specific labels. We adapted an annotation tool for this taxonomy and have annotated portions of two different dialogue corpora, Switchboard and the Distress Analysis Interview Corpus. We present examples of some of the tags and concepts with stories from Switchboard, and some initial statistics of frequencies of the tags.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 949–956, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{de_melo_as_2016,
title = {"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937063},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {949–956},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {There has been growing interest, across various domains, in computer agents that can decide on behalf of humans. These agents have the potential to save considerable time and help humans reach better decisions. One implicit assumption, however, is that, as long as the algorithms that simulate decision-making are correct and capture how humans make decisions, humans will treat these agents similarly to other humans. Here we show that interaction with agents that act on our behalf or on behalf of others is richer and more interesting than initially expected. Our results show that, on the one hand, people are more selfish with agents acting on behalf of others, than when interacting directly with others. We propose that agents increase the social distance with others which, subsequently, leads to increased demand. On the other hand, when people task an agent to interact with others, people show more concern for fairness than when interacting directly with others. In this case, higher psychological distance leads people to consider their social image and the long-term consequences of their actions and, thus, behave more fairly. To support these findings, we present an experiment where people engaged in the ultimatum game, either directly or via an agent, with others or agents representing others. We show that these patterns of behavior also occur in a variant of the ultimatum game – the impunity game – where others have minimal power over the final outcome. Finally, we study how social value orientation – i.e., people’s propensity for cooperation – impact these effects. These results have important implications for our understanding of the psychological mechanisms underlying interaction with agents, as well as practical implications for the design of successful agents that act on our behalf or on behalf of others.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Wortwein, Torsten; Morency, Louis–Philippe; Scherer, Stefan
A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety Proceedings Article
In: Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation, pp. 488–495, European Language Resources Association, Portoroz, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{chollet_multimodal_2016,
title = {A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety},
author = {Mathieu Chollet and Torsten Wortwein and Louis–Philippe Morency and Stefan Scherer},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/599_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation},
pages = {488–495},
publisher = {European Language Resources Association},
address = {Portoroz, Slovenia},
abstract = {The ability to efficiently speak in public is an essential asset for many professions and is used in everyday life. As such, tools enabling the improvement of public speaking performance and the assessment and mitigation of anxiety related to public speaking would be very useful. Multimodal interaction technologies, such as computer vision and embodied conversational agents, have recently been investigated for the training and assessment of interpersonal skills. Once central requirement for these technologies is multimodal corpora for training machine learning models. This paper addresses the need of these technologies by presenting and sharing a multimodal corpus of public speaking presentations. These presentations were collected in an experimental study investigating the potential of interactive virtual audiences for public speaking training. This corpus includes audio-visual data and automatically extracted features, measures of public speaking anxiety and personality, annotations of participants’ behaviors and expert ratings of behavioral aspects and overall performance of the presenters. We hope this corpus will help other research teams in developing tools for supporting public speaking training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
ZarrieB, Sina; Hough, Julian; Kennington, Casey; Manuvinakurike, Ramesh; DeVault, David; Fernández, Raquel; Schlangen, David
PentoRef: A Corpus of Spoken References in Task-oriented Dialogues Proceedings Article
In: 10th edition of the Language Resources and Evaluation Conference, ELRA, Portorož, Slovenia, 2016.
@inproceedings{zarrieb_pentoref_2016,
title = {PentoRef: A Corpus of Spoken References in Task-oriented Dialogues},
author = {Sina ZarrieB and Julian Hough and Casey Kennington and Ramesh Manuvinakurike and David DeVault and Raquel Fernández and David Schlangen},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/563_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {10th edition of the Language Resources and Evaluation Conference},
publisher = {ELRA},
address = {Portorož, Slovenia},
abstract = {PentoRef is a corpus of task-oriented dialogues collected in systematically manipulated settings. The corpus is multilingual, with English and German sections, and overall comprises more than 20000 utterances. The dialogues are fully transcribed and annotated with referring expressions mapped to objects in corresponding visual scenes, which makes the corpus a rich resource for research on spoken referring expressions in generation and resolution. The corpus includes several sub-corpora that correspond to different dialogue situations where parameters related to interactivity, visual access, and verbal channel have been manipulated in systematic ways. The corpus thus lends itself to very targeted studies of reference in spontaneous dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Konovalov, Vasily; Artstein, Ron; Melamud, Oren; Dagan, Ido
The Negochat Corpus of Human-agent Negotiation Dialogues Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 3141–3145, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{konovalov_negochat_2016,
title = {The Negochat Corpus of Human-agent Negotiation Dialogues},
author = {Vasily Konovalov and Ron Artstein and Oren Melamud and Ido Dagan},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/240_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {3141–3145},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {Annotated in-domain corpora are crucial to the successful development of dialogue systems of automated agents, and in particular for developing natural language understanding (NLU) components of such systems. Unfortunately, such important resources are scarce. In this work, we introduce an annotated natural language human-agent dialogue corpus in the negotiation domain. The corpus was collected using Amazon Mechanical Turk following the ‘Wizard-Of-Oz’ approach, where a ‘wizard’ human translates the participants’ natural language utterances in real time into a semantic language. Once dialogue collection was completed, utterances were annotated with intent labels by two independent annotators, achieving high inter-annotator agreement. Our initial experiments with an SVM classifier show that automatically inferring such labels from the utterances is far from trivial. We make our corpus publicly available to serve as an aid in the development of dialogue systems for negotiation agents, and suggest that analogous corpora can be created following our methodology and using our available source code. To the best of our knowledge this is the first publicly available negotiation dialogue corpus.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso De; Marsella, Stacy; Gratch, Jonathan
People Don’t Feel Guilty About Exploiting Machines Journal Article
In: ACM Transactions on Computer-Human Interaction (TOCHI), vol. 23, no. 2, pp. 1–17, 2016, ISSN: 1073-0516.
@article{melo_people_2016,
title = {People Don’t Feel Guilty About Exploiting Machines},
author = {Celso De Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2890495},
doi = {10.1145/2890495},
issn = {1073-0516},
year = {2016},
date = {2016-05-01},
journal = {ACM Transactions on Computer-Human Interaction (TOCHI)},
volume = {23},
number = {2},
pages = {1–17},
abstract = {Guilt and envy play an important role in social interaction. Guilt occurs when individuals cause harm to others or break social norms. Envy occurs when individuals compare themselves unfavorably to others and desire to benefit from the others’ advantage. In both cases, these emotions motivate people to act and change the status quo: following guilt, people try to make amends for the perceived transgression and, following envy, people try to harm envied others. In this paper, we present two experiments that study participants' experience of guilt and envy when engaging in social decision making with machines and humans. The results showed that, though experiencing the same level of envy, people felt considerably less guilt with machines than with humans. These effects occurred both with subjective and behavioral measures of guilt and envy, and in three different economic games: public goods, ultimatum, and dictator game. This poses an important challenge for human-computer interaction because, as shown here, it leads people to systematically exploit machines, when compared to humans. We discuss theoretical and practical implications for the design of human-machine interaction systems that hope to achieve the kind of efficiency – cooperation, fairness, reciprocity, etc. – we see in human-human interaction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gilani, Setareh Nasihati; Sheetz, Kraig; Lucas, Gale; Traum, David
What Kind of Stories Should a Virtual Human Swap? Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1437–1438, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{nasihati_gilani_what_2016,
title = {What Kind of Stories Should a Virtual Human Swap?},
author = {Setareh Nasihati Gilani and Kraig Sheetz and Gale Lucas and David Traum},
url = {http://dl.acm.org/citation.cfm?id=2937198},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1437–1438},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Stories are pervasive in conversation between people [5]. They are used to establish identity pass on cultural heritage, and build rapport. Often stories are swapped when one conversational participant will reply to a story with a story. Stories are also told by virtual humans [1, 6, 2]. In creating or mining stories for a virtual human (VH) to tell, there are a number of considerations that come up about what kinds of stories should be told, and how the stories should be related to the virtual human's identity, such as whether the identity should be human or arti⬚cial, and whether the stories should be about the virtual human or about someone else. We designed a set of virtual human characters who can engage in a simple form of story-swapping. Each of the characters can engage in simple interactions such as greetings and closings and can respond to a set of textbackslashtextbackslashtextbackslashtextbackslashice-breaker" questions, that might be used on a ⬚rst date or similar textbackslashtextbackslashtextbackslashtextbackslashget to know you" encounter. For these questions the character's answer includes a story. We created 4 character response sets, to have all combinations of identity (human or arti⬚cial) and perspective (⬚rst person stories about the narrator, or third person stories about someone else). We also designed an experiment to try to explore the collective impact of above principles on people who interact with the characters. Participants interact with two of the above characters in a "get to know you" scenario. We investigate the degree of reciprocity where people respond to the character with their own stories, and also compare rapport of participants with the characters as well as the impressions of the character's personality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Traum, David
Towards Automatic Identification of Effective Clues for Team Word-Guessing Games Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 2741–2747, European Language Resources Association, Portorož, Slovenia, 2016.
@inproceedings{pincus_towards_2016,
title = {Towards Automatic Identification of Effective Clues for Team Word-Guessing Games},
author = {Eli Pincus and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/762_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {2741–2747},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {Team word-guessing games where one player, the clue-giver, gives clues attempting to elicit a target-word from another player, the receiver, are a popular form of entertainment and also used for educational purposes. Creating an engaging computational agent capable of emulating a talented human clue-giver in a timed word-guessing game depends on the ability to provide effective clues (clues able to elicit a correct guess from a human receiver). There are many available web resources and databases that can be mined for the raw material for clues for target-words; however, a large number of those clues are unlikely to be able to elicit a correct guess from a human guesser. In this paper, we propose a method for automatically filtering a clue corpus for effective clues for an arbitrary target-word from a larger set of potential clues, using machine learning on a set of features of the clues, including point-wise mutual information between a clue’s constituent words and a clue’s target-word. The results of the experiments significantly improve the average clue quality over previous approaches, and bring quality rates in-line with measures of human clue quality derived from a corpus of human-human interactions. The paper also introduces the data used to develop this method; audio recordings of people making guesses after having heard the clues being spoken by a synthesized voice (Pincus and Traum, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
A Semi-automated Evaluation Metric for Dialogue Model Coherence Book Section
In: Situated Dialog in Speech-Based Human-Computer Interaction, pp. 217–225, Springer International Publishing, Cham, 2016, ISBN: 978-3-319-21833-5 978-3-319-21834-2.
@incollection{gandhe_semi-automated_2016,
title = {A Semi-automated Evaluation Metric for Dialogue Model Coherence},
author = {Sudeep Gandhe and David Traum},
url = {http://link.springer.com/10.1007/978-3-319-21834-2_19},
isbn = {978-3-319-21833-5 978-3-319-21834-2},
year = {2016},
date = {2016-04-01},
booktitle = {Situated Dialog in Speech-Based Human-Computer Interaction},
pages = {217–225},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {We propose a new metric, Voted Appropriateness, which can be used to automatically evaluate dialogue policy decisions, once some wizard data has been collected. We show that this metric outperforms a previously proposed metric Weak agreement.We also present a taxonomy for dialogue model evaluation schemas, and orient our new metric within this taxonomy.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
RIZZO, Albert; LUCAS, Gale; GRATCH, Jonathan; STRATOU, Giota; MORENCY, Louis-Philippe; CHAVEZ, Kenneth; SHILLING, Russ; SCHERER, Stefan
Automatic Behavior Analysis During a Clinical Interview with a Virtual Human. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 316–322, 2016.
@article{rizzo_automatic_2016,
title = {Automatic Behavior Analysis During a Clinical Interview with a Virtual Human.},
author = {Albert RIZZO and Gale LUCAS and Jonathan GRATCH and Giota STRATOU and Louis-Philippe MORENCY and Kenneth CHAVEZ and Russ SHILLING and Stefan SCHERER},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA316&dq=%22captured+across+a+20+minute+interview.+Results+from+of+sample+of+service%22+%22technology+for+clinical+purposes.+Recent+shifts+in+the+social+and%22+%22needed+to+create+VH+systems+is+now+driving+application+development+across%22+&ots=Ej8M4iuPfb&sig=Ad6Z3DPSwN3qA2gMDKWPe1YTPhg},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {316–322},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. Results from of sample of service members (SMs) who were interviewed before and after a deployment to Afghanistan indicate that SMs reveal more PTSD symptoms to the VH than they report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and few happy expressions at post deployment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Georgila, Kallirroi; Pynadath, David V.
Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events Proceedings Article
In: Proceedings of The 29th International FLAIRS Conference, pp. 44–49, AAAI Press, Key Largo, FL, 2016.
@inproceedings{georgila_towards_2016,
title = {Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events},
author = {Kallirroi Georgila and David V. Pynadath},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12960/12539},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of The 29th International FLAIRS Conference},
pages = {44–49},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Accurate multiagent social simulation requires a computational model of how people incorporate their observations of real-world events into their beliefs about the state of their world. Current methods for creating such agent-based models typically rely on manual input that can be both burdensome and subjective. In this investigation, we instead pursue automated methods that can translate available data into the desired computational models. For this purpose, we use a corpus of real-world events in combination with longitudinal public opinion polls on a variety of opinion issues. We perform two experiments using automated methods taken from the literature. In our first experiment, we train maximum entropy classifiers to model changes in opinion scores as a function of real-world events. We measure and analyze the accuracy of our learned classifiers by comparing the opinion scores they generate against the opinion scores occurring in a held-out subset of our corpus. In our second experiment, we learn Bayesian networks to capture the same function.We then compare the dependency structures induced by the two methods to identify the event features that have the most significant effect on changes in public opinion.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2007
Lee, Jina; Marsella, Stacy C.; Traum, David; Gratch, Jonathan; Lance, Brent
The Rickel Gaze Model: A Window on the Mind of a Virtual Human Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 296–303, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{lee_rickel_2007,
title = {The Rickel Gaze Model: A Window on the Mind of a Virtual Human},
author = {Jina Lee and Stacy C. Marsella and David Traum and Jonathan Gratch and Brent Lance},
url = {http://ict.usc.edu/pubs/The%20Rickel%20Gaze%20Model-%20A%20Window%20on%20the%20Mind%20of%20a%20Virtual%20Human.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {296–303},
address = {Paris, France},
abstract = {Gaze plays a large number of cognitive, communicative and affective roles in face-to-face human interaction. To build a believable virtual human, it is imperative to construct a gaze model that generates realistic gaze behaviors. However, it is not enough to merely imitate a person's eye movements. The gaze behaviors should reflect the internal states of the virtual human and users should be able to derive them by observing the behaviors. In this paper, we present a gaze model driven by the cognitive operations; the model processes the virtual human's reasoning, dialog management, and goals to generate behaviors that reflect the agent's inner thoughts. It has been implemented in our virtual human system and operates in real-time. The gaze model introduced in this paper was originally designed and developed by Jeff Rickel but has since been extended by the authors.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Marsella, Stacy C.
Rejection of empathy in negotiation Journal Article
In: Group Decision and Negotiation, vol. 16, pp. 61–76, 2007, ISSN: 0926-2644.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{martinovski_rejection_2007,
title = {Rejection of empathy in negotiation},
author = {Bilyana Martinovski and David Traum and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Rejection%20of%20empathy%20in%20negotiation.pdf},
issn = {0926-2644},
year = {2007},
date = {2007-01-01},
journal = {Group Decision and Negotiation},
volume = {16},
pages = {61–76},
abstract = {Trust is a crucial quality in the development of individuals and societies and empathy plays a key role in the formation of trust. Trust and empathy have growing importance in studies of negotiation. However, empathy can be rejected which complicates its role in negotiation. This paper presents a linguistic analysis of empathy by focusing on rejection of empathy in negotiation. Some of the rejections are due to failed recognition of the rejector's needs and desires whereas others have mainly strategic functions gaining momentum in the negotiation. In both cases, rejection of empathy is a phase in the negotiation not a breakdown.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Jan, Dusan; Herrera, David; Martinovski, Bilyana; Novick, David; Traum, David
A Computational Model of Culture-Specific Conversational Behavior Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_computational_2007,
title = {A Computational Model of Culture-Specific Conversational Behavior},
author = {Dusan Jan and David Herrera and Bilyana Martinovski and David Novick and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Culture-Specific%20Conversational%20Behavior.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference},
address = {Paris, France},
abstract = {This paper presents a model for simulating cultural differences in the conversational behavior of virtual agents. The model provides parameters for differences in proxemics, gaze and overlap in turn taking.We present a review of literature on these factors and show results of a study where native speakers of North American English, Mexican Spanish and Arabic were asked to rate the realism of the simulations generated based on different cultural parameters with respect to their culture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
Evolving Expression of Emotions through Color in Virtual Humans using Genetic Algorithms Proceedings Article
In: Proceedings of the 1st International Conference on Computational Creativity (ICCC-X), pp. 248–257, 2007, ISBN: 978-989-96001-2-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{de_melo_evolving_2007,
title = {Evolving Expression of Emotions through Color in Virtual Humans using Genetic Algorithms},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evolving%20Expression%20of%20Emotions%20through%20Color%20in%20Virtual%20Humans%20using%20Genetic%20Algorithms.pdf},
isbn = {978-989-96001-2-6},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the 1st International Conference on Computational Creativity (ICCC-X)},
pages = {248–257},
abstract = {For centuries artists have been exploring the formal elements of art (lines, space, mass, light, color, sound, etc.) to express emotions. This paper takes this insight to explore new forms of expression for virtual humans which go beyond the usual bodily, facial and vocal expression channels. In particular, the paper focuses on how to use color to influence the perception of emotions in virtual humans. First, a lighting model and filters are used to manipulate color. Next, an evolutionary model, based on genetic algorithms, is developed to learn novel associations between emotions and color. An experiment is then conducted where non-experts evolve mappings for joy and sadness, without being aware that genetic algorithms are used. In a second experiment, the mappings are analyzed with respect to its features and how general they are. Results indicate that the average fitness increases with each new generation, thus suggesting that people are succeeding in creating novel and useful mappings for the emotions. Moreover, the results show consistent differences between the evolved images of joy and the evolved images of sadness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Wang, Ning; Okhmatovskaia, Anna; Lamothe, Francois; Morales, Mathieu; Werf, R. J.; Morency, Louis-Philippe
Can virtual humans be more engaging than real ones? Proceedings Article
In: Proceedings of the International Conference on Human-Computer Interaction, HCI Intelligent Multimodal Interaction Environments, pp. 286–297, Beijing, China, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_can_2007,
title = {Can virtual humans be more engaging than real ones?},
author = {Jonathan Gratch and Ning Wang and Anna Okhmatovskaia and Francois Lamothe and Mathieu Morales and R. J. Werf and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Can%20virtual%20humans%20be%20more%20engaging%20than%20real%20ones.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the International Conference on Human-Computer Interaction, HCI Intelligent Multimodal Interaction Environments},
pages = {286–297},
address = {Beijing, China},
abstract = {Emotional bonds don't arise from a simple exchange of facial displays, but often emerge through the dynamic give and take of face-to-face interactions. This article explores the phenomenon of rapport, a feeling of connectedness that seems to arise from rapid and contingent positive feedback between partners and is often associated with socio-emotional processes. Rapport has been argued to lead to communicative efficiency, better learning outcomes, improved acceptance of medical advice and successful negotiations. We provide experimental evidence that a simple virtual character that provides positive listening feedback can induce stronger rapport-like effects than face-to-face communication between human partners. Specifically, this interaction can be more engaging to storytellers than speaking to a human audience, as measured by the length and content of their stories.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Wang, Ning; Gerten, Jillian; Fast, Edward; Duffy, Robin
Creating Rapport with Virtual Agents Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 125–128, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_creating_2007,
title = {Creating Rapport with Virtual Agents},
author = {Jonathan Gratch and Ning Wang and Jillian Gerten and Edward Fast and Robin Duffy},
url = {http://ict.usc.edu/pubs/Creating%20Rapport%20with%20Virtual%20Agents.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {125–128},
address = {Paris, France},
abstract = {Recent research has established the potential for virtual characters to establish rapport with humans through simple contingent nonverbal behaviors. We hypothesized that the contingency, not just the frequency of positive feedback is crucial when it comes to creating rapport. The primary goal in this study was evaluative: can an agent generate behavior that engenders feelings of rapport in human speakers and how does this compare to human generated feedback? A secondary goal was to answer the question: Is contingency (as opposed to frequency) of agent feedback crucial when it comes to creating feelings of rapport? Results suggest that contingency matters when it comes to creating rapport and that agent generated behavior was as good as human listeners in creating rapport. A "virtual human listener" condition performed worse than other conditions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Graap, Ken; McLay, Robert N.; Perlman, Karen; Rothbaum, Barbara O.; Reger, Greg; Parsons, Thomas D.; Difede, JoAnn; Pair, Jarrell
Virtual Iraq: Initial Case Reports from a VR Exposure Therapy Application for Combat-Related Post Traumatic Stress Disorder Journal Article
In: Virtual Rehabilitation, vol. 27, pp. 124–130, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_virtual_2007,
title = {Virtual Iraq: Initial Case Reports from a VR Exposure Therapy Application for Combat-Related Post Traumatic Stress Disorder},
author = {Albert Rizzo and Ken Graap and Robert N. McLay and Karen Perlman and Barbara O. Rothbaum and Greg Reger and Thomas D. Parsons and JoAnn Difede and Jarrell Pair},
url = {http://ict.usc.edu/pubs/Virtual%20Iraq-%20Initial%20Case%20Reports%20from%20a%20VR%20Exposure%20Therapy%20Application%20for%20Combat-Related%20Post%20Traumatic%20Stress%20Disorder.pdf},
year = {2007},
date = {2007-01-01},
journal = {Virtual Rehabilitation},
volume = {27},
pages = {124–130},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale and brief description of a Virtual Iraq PTSD VR therapy application and present initial findings from two successfully treated patients. The VR treatment environment was created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far, Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center and the results from two successfully treated patients are presented along with a delineation of our future plans for research and clinical care using this application.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gandhe, Sudeep; Traum, David
First Steps Towards Dialogue Modelling from an Un-annotated Human-Human Corpus Proceedings Article
In: 5th Workshop on Knowledge and Reasoning in Practical Dialogue Systems, Hyderabad, India, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_first_2007,
title = {First Steps Towards Dialogue Modelling from an Un-annotated Human-Human Corpus},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/First%20Steps%20towards%20Dialogue%20Modelling%20from%20an%20Un-annotated%20Human-Human%20Corpus.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {5th Workshop on Knowledge and Reasoning in Practical Dialogue Systems},
address = {Hyderabad, India},
abstract = {Virtual human characters equipped with natural language dialogue capability have proved useful in many fields like simulation training and interactive games. Generally behind such dialogue managers lies a complex knowledge-rich rule-based system. Building such system involves meticulous annotation of data and hand autoring of rules. In this paper we build a statistical dialogue model from roleplay and wizard of oz dialog corpus with virtually no annotation. We compare these methods with the tra ditional approaches. We have evaluated these systems for perceived appropriateness of response and the results are presented here.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2006
Gratch, Jonathan; Okhmatovskaia, Anna; Duncan, Susan
Virtual Humans for the Study of Rapport in Cross Cultural Settings Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_virtual_2006,
title = {Virtual Humans for the Study of Rapport in Cross Cultural Settings},
author = {Jonathan Gratch and Anna Okhmatovskaia and Susan Duncan},
url = {http://ict.usc.edu/pubs/VIRTUAL%20HUMANS%20FOR%20THE%20STUDY%20OF%20RAPPORT%20IN%20CROSS%20CULTURAL%20SETTINGS.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {As an increasing part of the Army's mission involves establishing rapport with diverse populations, training interpersonal skills becomes critically important. Here we describe a "Rapport Agent" that senses and responds to a speakerQs nonverbal behavior and provide empirical evidence that it increases speaker fluency and engagement. We argue such agent technology has potential, both as a training system to enhance communication skills, and to assess the key factors that influence rapport in face-to-face interactions. We conclude by discussing ways the nonverbal correlates of rapport vary between Arabic and English speakers and discuss the potential of such technology to advance research and training into rapport in cross-cultural settings.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Traum, David; Lane, H. Chad; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan; Lent, Michael
Teaching Negotiation Skills through Practice and Reflection with Virtual Humans Journal Article
In: Simulation: Transactions of the Society for Modeling and Simulation, vol. 82, no. 11, pp. 685–701, 2006.
Abstract | Links | BibTeX | Tags: Learning Sciences, Social Simulation, Virtual Humans
@article{core_teaching_2006,
title = {Teaching Negotiation Skills through Practice and Reflection with Virtual Humans},
author = {Mark Core and David Traum and H. Chad Lane and William Swartout and Stacy C. Marsella and Jonathan Gratch and Michael Lent},
url = {http://ict.usc.edu/pubs/Teaching%20Negotiation%20Skills.pdf},
year = {2006},
date = {2006-11-01},
journal = {Simulation: Transactions of the Society for Modeling and Simulation},
volume = {82},
number = {11},
pages = {685–701},
abstract = {Although the representation of physical environments and behaviors will continue to play an important role in simulation-based training, an emerging challenge is the representation of virtual humans with rich mental models (e.g., including emotions, trust) that interact through conversational as well as physical behaviors. The motivation for such simulations is training soft skills such as leadership, cultural awareness, and negotiation, where the majority of actions are conversational, and the problem solving involves consideration of the emotions, attitudes, and desires of others.The educational power of such simulations can be enhanced by the integration of an intelligent tutoring system to support learners� understanding of the effect of their actions on virtual humans and how they might improve their performance. In this paper, we discuss our efforts to build such virtual humans, along with an accompanying intelligent tutor, for the domain of negotiation and cultural awareness.},
keywords = {Learning Sciences, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Leuski, Anton; Kennedy, Brandon; Patel, Ronakkumar; Traum, David
Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be? Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_asking_2006,
title = {Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be?},
author = {Anton Leuski and Brandon Kennedy and Ronakkumar Patel and David Traum},
url = {http://ict.usc.edu/pubs/Asking%20Questions%20to%20Limited%20Domain%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper, we describe the evaluation of a limited domain question-answering characters, particularly as to the effect of non-optimal speech recognition, and the ability to appropriately answer novel questions. Results show that answering ability is robust until speech recognition reaches over 60% Word error rate.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Parsons, Thomas D.; Liewer, Matt; Graap, Ken; Difede, JoAnn; Rothbaum, Barbara O.; Reger, Greg; Roy, Michael
A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_virtual_2006-1,
title = {A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder},
author = {Albert Rizzo and Jarrell Pair and Thomas D. Parsons and Matt Liewer and Ken Graap and JoAnn Difede and Barbara O. Rothbaum and Greg Reger and Michael Roy},
url = {http://ict.usc.edu/pubs/A%20VIRTUAL%20REALITY%20THERAPY%20APPLICATION%20FOR%20OEF%20OIF%20COMBAT-RELATED%20POST%20TRAUMATIC%20STRESS%20DISORDER.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Kim, Julia; Zbylut, MIchelle L.; Gordon, Andrew S.; Traum, David; Gandhe, Sudeep; King, Stewart; Lavis, Salvo; Rocher, Scott
AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{hill_axlnet_2006,
title = {AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders},
author = {Randall W. Hill and Julia Kim and MIchelle L. Zbylut and Andrew S. Gordon and David Traum and Sudeep Gandhe and Stewart King and Salvo Lavis and Scott Rocher},
url = {http://ict.usc.edu/pubs/AXLNet-%20Web-enabled%20Case%20Method%20Instruction%20for%20Accelerating%20Tacit%20Knowledge%20Acquisition%20in%20Leaders.PDF},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {AXL.Net is a prototype web-based immersive technology solution that supports case method teaching for U.S. Army leader development. The AXL.Net system addresses three challenges: (1) designing a pedagogicallysound research prototype for leader development, (2) integrating research technologies with the best of Web 2.0 innovations to enhance case method teaching, and (3) providing an easy to use system. Initial evaluations show that the prototype application and framework is effective for leader development.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Leuski, Anton; Rangarajan, Vivek; Robinson, Susan; Vaswani, Ashish; Narayanan, Shrikanth; Traum, David
Radiobot-CFF: A Spoken Dialogue System for Military Training Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_radiobot-cff_2006,
title = {Radiobot-CFF: A Spoken Dialogue System for Military Training},
author = {Antonio Roque and Anton Leuski and Vivek Rangarajan and Susan Robinson and Ashish Vaswani and Shrikanth Narayanan and David Traum},
url = {http://ict.usc.edu/pubs/Radiobot-CFF-%20A%20Spoken%20Dialogue%20System%20for%20Military%20Training.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {We describe a spoken dialogue system which can engage in Call For Fire (CFF) radio dialogues to help train soldiers in proper procedures for requesting artillery fire missions. We describethe domain, an information-state dialogue manager with a novel system of interactive information components, and provide evaluation results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tepperman, Joseph; Traum, David; Narayanan, Shrikanth
"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{tepperman_yeah_2006,
title = {"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems},
author = {Joseph Tepperman and David Traum and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Yeah%20Right-%20Sarcasm%20Recognition%20for%20Spoken%20Dialogue%20Systems.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {The robust understanding of sarcasm in a spoken dialogue system requires a reformulation of the dialogue manager's basic assumptions behind, for example, user behavior and grounding strategies. But automatically detecting a sarcastic tone of voice is not a simple matter. This paper presents some experiments toward sarcasm recognition using prosodic, spectral, and contextual cues. Our results demonstrate that spectral and contextual features can be used to detect sarcasm as well as a human annotator would, and confirm a long-held claim in the field of psychology — that prosody alone is not sufficient to discern whether a speaker is being sarcastic.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gluck, Kevin A.; Gunzelmann, Glenn; Gratch, Jonathan; Hudlicka, Eva; Ritter, Frank E.
Modeling the Impact of Cognitive Moderators on Human Cognition and Performance Proceedings Article
In: Proceedings of the 2006 Conference of the Cognitive Society, pp. 2658, Vancouver, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gluck_modeling_2006,
title = {Modeling the Impact of Cognitive Moderators on Human Cognition and Performance},
author = {Kevin A. Gluck and Glenn Gunzelmann and Jonathan Gratch and Eva Hudlicka and Frank E. Ritter},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Impact%20of%20Cognitive%20Moderators%20on%20Human%20Cognition%20and%20Performance.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 2006 Conference of the Cognitive Society},
pages = {2658},
address = {Vancouver, CA},
abstract = {Cognitive moderators, such as emotions, personality, stress, and fatigue, represent an emerging area of research within the cognitive science community and are increasingly acknowledged as important and ubiquitous influences on cognitive processes. This symposium brings together scientists engaged in research to develop models that help us better understand the mechanisms through which these factors impact human cognition and performance. There are two unifying themes across the presentations. One theme is a commitment to developing computational models useful for simulating the processes that produce the effects and phenomena of interest. The second theme is a commitment to assessing the validity of the models by comparing their performance against empirical human data.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Ronakkumar; Leuski, Anton; Traum, David
Dealing with Out of Domain Questions in Virtual Characters Proceedings Article
In: Proceedings of the 6th International Conference on Intelligent Virtual Agents, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_dealing_2006,
title = {Dealing with Out of Domain Questions in Virtual Characters},
author = {Ronakkumar Patel and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Out%20of%20Domain%20Questions%20in%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 6th International Conference on Intelligent Virtual Agents},
address = {Marina del Rey, CA},
abstract = {We consider the problem of designing virtual characters that support speech-based interactions in a limited domain. Previously we have shown that classification can be an effective and robust tool for selecting appropriate in-domain responses. In this paper, we consider the problem of dealing with out-of-domain user questions. We introduce a taxonomy of out-of-domain response types. We consider three classification architectures for selecting the most appropriate out-of-domain responses. We evaluate these architectures and show that they significantly improve the quality of the response selection making the user?s interaction with the virtual character more natural and engaging.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Carnicke, Sharon Marie; Gratch, Jonathan; Okhmatovskaia, Anna; Rizzo, Albert
An Exploration of Delsartes Structural Acting System Proceedings Article
In: Proceedings of the 6th International Conference on Intelligent Virtual Agents (IVA), pp. 80–92, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{marsella_exploration_2006,
title = {An Exploration of Delsartes Structural Acting System},
author = {Stacy C. Marsella and Sharon Marie Carnicke and Jonathan Gratch and Anna Okhmatovskaia and Albert Rizzo},
url = {http://ict.usc.edu/pubs/An%20Exploration%20of%20Delsarte%E2%80%99s%20Structural%20Acting%20System.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 6th International Conference on Intelligent Virtual Agents (IVA)},
pages = {80–92},
address = {Marina del Rey, CA},
abstract = {The designers of virtual agents often draw on a large research literature in psychology, linguistics and human ethology to design embodied agents that can interact with people. In this paper, we consider a structural acting system developed by Francois Delsarte as a possible resource in designing the nonverbal behavior of embodied agents. Using human subjects,we evaluate one component of the system, Delsarte's Cube, that addresses the meaning of differing attitudes of the hand in gestures.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Okhmatovskaia, Anna; Lamothe, Francois; Marsella, Stacy C.; Morales, Mathieu; Werf, R. J.; Morency, Louis-Philippe
Virtual Rapport Proceedings Article
In: Lecture Notes in Computer Science, pp. 14–27, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_virtual_2006-1,
title = {Virtual Rapport},
author = {Jonathan Gratch and Anna Okhmatovskaia and Francois Lamothe and Stacy C. Marsella and Mathieu Morales and R. J. Werf and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Virtual%20Rapport.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Lecture Notes in Computer Science},
volume = {4311},
pages = {14–27},
address = {Marina del Rey, CA},
abstract = {Effective face-to-face conversations are highly interactive. Participants respond to each other, engaging in nonconscious behavioral mimicry and backchanneling feedback. Such behaviors produce a subjective sense of rapport and are correlated with effective communication, greater liking and trust, and greater influence between participants. Creating rapport requires a tight sense-act loop that has been traditionally lacking in embodied conversational agents. Here we describe a system, based on psycholinguistic theory, designed to create a sense of rapport between a human speaker and virtual human listener. We provide empirical evidence that it increases speaker fluency and engagement.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
An Information State-Based Dialogue Manager for Call for Fire Dialogues Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_information_2006,
title = {An Information State-Based Dialogue Manager for Call for Fire Dialogues},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/An%20Information%20State-Based%20Dialogue%20Manager%20for%20Call%20for%20Fire%20Dialogues.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {We present a dialogue manager for "Call for Fire" training dialogues. We describe the training environment, the domain, the features of its novel information state-based dialogue manager, the system it is a part of, and preliminary evaluation results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Patel, Ronakkumar; Traum, David; Kennedy, Brandon
Building Effective Question Answering Characters Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_building_2006,
title = {Building Effective Question Answering Characters},
author = {Anton Leuski and Ronakkumar Patel and David Traum and Brandon Kennedy},
url = {http://ict.usc.edu/pubs/Building%20Effective%20Question%20Answering%20Characters.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {In this paper, we describe methods for building and evaluation of limited domain question-answering characters. Several classification techniques are tested, including text classification using support vector machines, language-model based retrieval, and cross-language information retrieval techniques, with the latter having the highest success rate. We also evaluated the effect of speech recognition errors on performance with users, finding that retrieval is robust until recognition reaches over 50% WER.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Egges, Arjan; Eliëns, Anton; Isbister, Katherine; Paiva, Ana; Rist, Thomas; Hagen, Paul
Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans Proceedings Article
In: Dagstuhl Seminar Proceedings, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_design_2006,
title = {Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans},
author = {Jonathan Gratch and Stacy C. Marsella and Arjan Egges and Anton Eliëns and Katherine Isbister and Ana Paiva and Thomas Rist and Paul Hagen},
url = {http://ict.usc.edu/pubs/Design%20criteria%20techniques%20and%20case%20studies%20for%20creating%20and%20evaluating%20interactive%20experiences%20for%20virtual%20humans.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Dagstuhl Seminar Proceedings},
abstract = {How does one go about designing a human? With the rise in recent years of virtual humans this is no longer purely a philosophical question. Virtual humans are intelligent agents with a body, often a human-like graphical body, that interact verbally and non-verbally with human users on a variety of tasks and applications. At a recent meeting on this subject, the above authors participated in a several day discussion on the question of virtual human design. Our working group approached this question from the perspective of interactivity. Specifically, how can one design effective interactive experiences involving a virtual human, and what constraints does this goal place on the form and function of an embodied conversational agent. Our group grappled with several related questions: What ideals should designers aspire to, what sources of theory and data will best lead to this goal and what methodologies can inform and validate the design process? This article summarizes our output and suggests a specific framework, borrowed from interactive media design, as a vehicle for advancing the state of interactive experiences with virtual humans.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents Book Section
In: Modeling Communication with Robots and Virtual Humans, pp. 296–309, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{traum_talking_2006,
title = {Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Talking%20to%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Modeling Communication with Robots and Virtual Humans},
pages = {296–309},
abstract = {Virtual Humans are artificial characters who look and act like humans, but inhabit a simulated environment. One important aspect of many virtual humans is their communicative dialogue ability. In this paper we outline a methodology for study of dialogue behavior and construction of virtual humans. We also consider three architectures for different types of virtual humans that have been built at the Institute for Creative Technologies.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Marsella, Stacy C.; Gratch, Jonathan
EMA: A computational model of appraisal dynamics Proceedings Article
In: Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion, Vienna, Austria, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{marsella_ema_2006,
title = {EMA: A computational model of appraisal dynamics},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/EMA-%20A%20computational%20model%20of%20appraisal%20dynamics.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion},
address = {Vienna, Austria},
abstract = {A computational model of emotion must explain both the rapid dynamics of some emotional reactions as well as the slower responses that follow deliberation. This is often addressed by positing multiple appraisal processes such as fast pattern directed vs. slower deliberative appraisals. In our view, this confuses appraisal with inference. Rather, we argue for a single and automatic appraisal process that operates over a person’s interpretation of their relationship to the environment. Dynamics arise from perceptual and inferential processes operating on this interpretation (including deliberative and reactive processes). We illustrate this perspective through the computational modeling of a naturalistic emotional situation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Graap, Ken; Manson, Brian; McNerney, Peter J.; Wiederhold, Brenda K.; Wiederhold, Mark; Spira, James
A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment Proceedings Article
In: NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment},
author = {Albert Rizzo and Jarrell Pair and Ken Graap and Brian Manson and Peter J. McNerney and Brenda K. Wiederhold and Mark Wiederhold and James Spira},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Military%20Personnel%20with%20Post%20Traumatic%20Stress%20Disorder-%20From%20Training%20to%20Toy%20to%20Treatment.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder},
abstract = {Post Traumatic Stress Disorder is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of a Virtual Iraq PTSD VR application that has been created from the virtual assets that were initially developed for a combat tactical training simulation, which then served as the inspiration for the X-Box game entitled Full Spectrum Warrior.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Allen, Brian; Dautricourt, Matthieu; Treskunov, Anton; Liewer, Matt; Graap, Ken; Reger, Greg; Rizzo, Albert
A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the IEEE VR 2006 Conference, pp. 64–71, Alexandria, VA, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{pair_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder},
author = {Jarrell Pair and Brian Allen and Matthieu Dautricourt and Anton Treskunov and Matt Liewer and Ken Graap and Greg Reger and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Post%20Traumatic%20Stress%20Disorder.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {Proceedings of the IEEE VR 2006 Conference},
pages = {64–71},
address = {Alexandria, VA},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-Centered tests with the application are currently underway at the Naval Medical Center–San Diego and within an Army Combat Stress Control Team in Iraq with clinical trials scheduled to commence in February 2006.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul
A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond Journal Article
In: Tutorials in Quantitative Methods for Psychology, vol. 2, no. 2, pp. 43–51, 2006.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@article{rosenbloom_cognitive_2006,
title = {A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/A%20Cognitive%20Odyssey-%20From%20the%20Power%20Law%20of%20Practice%20to%20a%20General%20Learning%20Mechanism%20and%20Beyond.pdf},
year = {2006},
date = {2006-01-01},
journal = {Tutorials in Quantitative Methods for Psychology},
volume = {2},
number = {2},
pages = {43–51},
abstract = {This article traces a line of research that began with the establishment of a pervasive regularity in human performance – the Power Law of Practice – and proceeded through several decades' worth of investigations that this opened up into learning and cognitive architecture. The results touch on both cognitive psychology and artificial intelligence, and more specifically on the possibily of building general learning mechanisms/systems. It is a story whose final chapter is still to be written.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Dillenbourg, Pierre; Traum, David
Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving Journal Article
In: The Journal of the Learning Sciences, vol. 15, no. 1, pp. 121–151, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{dillenbourg_sharing_2006,
title = {Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving},
author = {Pierre Dillenbourg and David Traum},
url = {http://ict.usc.edu/pubs/Sharing%20Solutions-%20Persistence%20and%20Grounding%20in%20Multimodal%20Collaborative%20Problem%20Solving.pdf},
year = {2006},
date = {2006-01-01},
journal = {The Journal of the Learning Sciences},
volume = {15},
number = {1},
pages = {121–151},
abstract = {This article reports on an exploratory study of the relationship between grounding and problem solving in multimodal computer-mediated collaboration. This article examines two different media, a shared whiteboard and a MOO environment that includes a text chat facility. A study was done on how the acknowledgment rate (how often partners give feedback of having perceived, understood, and accepted partner's contributions) varies according to the media and the content of interactions. It was expected that the whiteboard would serve to draw schemata that disambiguate chat utterances. Instead, results show that the whiteboard is primarily used to represent the state of problem solving and the chat is used for grounding information created on the whiteboard. These results are interpreted in terms of persistence: More persistent information is exchanged through the more persistent medium. The whiteboard was used as a shared memory rather than a grounding tool.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.; Wenji, Mao
Towards a Validated Model of "Emotional Intelligence" Proceedings Article
In: Proceedings of the 21st National Conference on Artificial Intelligence, pp. 1613–1616, Boston, MA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_towards_2006,
title = {Towards a Validated Model of "Emotional Intelligence"},
author = {Jonathan Gratch and Stacy C. Marsella and Mao Wenji},
url = {http://ict.usc.edu/pubs/Towards%20a%20Validated%20Model%20of%20Emotional%20Intelligence.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 21st National Conference on Artificial Intelligence},
volume = {2},
pages = {1613–1616},
address = {Boston, MA},
abstract = {This article summarizes recent progress in developing a validated computational account of the cognitive antecedents and consequences of emotion. We describe the potential of this work to impact a variety of AI problem domains.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Traum, David
Improving Question-Answering With Linking Dialogues Proceedings Article
In: International Conference on Intelligent User Interfaces (IUI-2006), Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gandhe_improving_2006,
title = {Improving Question-Answering With Linking Dialogues},
author = {Sudeep Gandhe and Andrew S. Gordon and David Traum},
url = {http://ict.usc.edu/pubs/Improving%20Question-Answering%20With%20Linking%20Dialogues%20.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Intelligent User Interfaces (IUI-2006)},
address = {Sydney, Australia},
abstract = {Question-answering dialogue systems have found many applications in interactive learning environments. This paper is concerned with one such application for Army leadership training, where trainees input free-text questions that elicit pre-recorded video responses. Since these responses are already crafted before the question is asked, a certain degree of incoherence exists between the question that is asked and the answer that is given. This paper explores the use of short linking dialogues that stand in between the question and its video response to alleviate the problem of incoherence. We describe a set of experiments with human generated linking dialogues that demonstrate their added value. We then describe our implementation of an automated method for utilizing linking dialogues and show that these have better coherence properties than the original system without linking dialogues.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Evaluating a Computational Model of Social Causality and Responsibility Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Hakodate, Japan, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_evaluating_2006,
title = {Evaluating a Computational Model of Social Causality and Responsibility},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20Computational%20Model%20of%20Social%20Causality%20and%20Responsibility.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Hakodate, Japan},
abstract = {Intelligent agents are typically situated in a social environment and must reason about social cause and effect. Such reasoning is qualitatively different from physical causal reasoning that underlies most intelligent systems. Modeling social causal reasoning can enrich the capabilities of multi-agent systems and intelligent user interfaces. In this paper, we empirically evaluate a computational model of social causality and responsibility against human social judgments. Results from our experimental studies show that in general, the model's predictions of internal variables and inference process are consistent with human responses, though they also suggest some possible refinement to the computational model.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Lindheim, Richard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation Book Section
In: Multimodal Intelligent Information Presentation, vol. 27, pp. 305–321, Springer, Netherlands, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{swartout_simulation_2006,
title = {Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Richard Lindheim and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/SIMULATION%20MEETS%20HOLLYWOOD-%20Integrating%20Graphics,%20Sound,%20Story%20and%20Character%20for%20Immersive%20Simulation.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Multimodal Intelligent Information Presentation},
volume = {27},
pages = {305–321},
publisher = {Springer},
address = {Netherlands},
abstract = {The Institute for Creative Technologies was created at the University of Southern California with the goal of bringing together researchers in simulation technology to collaborate with people from the entertainment industry. The idea was that much more compelling simulations could be developed if researchers who understood state-of-the-art simulation technology worked together with writers and directors who knew how to create compelling stories and characters. This paper presents our first major effort to realize that vision, the Mission Rehearsal Exercise Project, which confronts a soldier trainee with the kinds of dilemmas he might reasonably encounter in a peacekeeping operation. The trainee is immersed in a synthetic world and interacts with virtual humans: artificially intelligent and graphically embodied conversational agents that understand and generate natural language, reason about world events and respond appropriately to the trainee's actions or commands. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have also joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. In this paper we describe the Mission Rehearsal Exercise system and the insights gained through this large-scale integration.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Roque, Antonio; Ai, Hua; Traum, David
Evaluation of an Information State-Based Dialogue Manager Proceedings Article
In: Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue, Potsdam, Germany, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_evaluation_2006,
title = {Evaluation of an Information State-Based Dialogue Manager},
author = {Antonio Roque and Hua Ai and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20an%20Information%20State-Based%20Dialogue%20Manager.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue},
address = {Potsdam, Germany},
abstract = {We describe an evaluation of an information state-based dialogue manager by measuring its accuracy in information state component updating.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Graap, Ken; Pair, Jarrell; Reger,; Treskunov, Anton; Parsons, Thomas D.
User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder Proceedings Article
In: Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology, Esbjerg, Denmark, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_user-centered_2006,
title = {User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder},
author = {Albert Rizzo and Ken Graap and Jarrell Pair and Reger and Anton Treskunov and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/User-centered%20design%20driven%20development%20of%20a%20virtual%20reality%20therapy%20application%20for%20Iraq%20war%20combat-related%20post%20traumatic%20stress%20disorder.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Esbjerg, Denmark},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Toward Virtual Humans Journal Article
In: AI Magazine, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{swartout_toward_2006,
title = {Toward Virtual Humans},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/Toward%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-01-01},
journal = {AI Magazine},
abstract = {This paper describes the virtual humans developed as part of the Mission Rehearsal Exercise project, a virtual reality-based training system. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. We describe the key capabilities of the virtual humans, including task representation and reasoning, natural language dialogue, and emotion reasoning, and show how these capabilities are integrated to provide more human-level intelligence than would otherwise be possible.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Leuski, Anton; Pair, Jarrell; Traum, David; McNerney, Peter J.; Georgiou, Panayiotis G.; Patel, Ronakkumar
How to Talk to a Hologram Proceedings Article
In: Proceedings of the 11th International Conference on Intelligent User Interfaces, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_how_2006,
title = {How to Talk to a Hologram},
author = {Anton Leuski and Jarrell Pair and David Traum and Peter J. McNerney and Panayiotis G. Georgiou and Ronakkumar Patel},
url = {http://ict.usc.edu/pubs/How%20to%20Talk%20to%20a%20Hologram.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 11th International Conference on Intelligent User Interfaces},
address = {Sydney, Australia},
abstract = {There is a growing need for creating life-like virtual human simulations that can conduct a natural spoken dialog with a human student on a predefined subject. We present an overview of a spoken-dialog system that supports a person interacting with a full-size hologram-like virtual human character in an exhibition kiosk settings. We also give a brief summary of the natural language classification component of the system and describe the experiments we conducted with the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Mao, Wenji; Marsella, Stacy C.
Modeling Social Emotions and Social Attributions Book Section
In: Sun, R. (Ed.): Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation, Cambridge University Press, 2006.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{gratch_modeling_2006,
title = {Modeling Social Emotions and Social Attributions},
author = {Jonathan Gratch and Wenji Mao and Stacy C. Marsella},
editor = {R. Sun},
url = {http://ict.usc.edu/pubs/Modeling%20Social%20Emotions%20and%20Social%20Attributions.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation},
publisher = {Cambridge University Press},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
2005
Traum, David; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan
Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_fight_2005,
title = {Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis},
author = {David Traum and William Swartout and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Fight,%20Flight,%20or%20Negotiate-%20Believable%20Strategies%20for%20Conversing%20under%20Crisis.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {This paper des ribes a model of onversation strategies implemented in virtual humans designed to help people learn negotiation skills. We motivate and dis uss these strategies and their use to allow a virtual human to engage in omplex adversarial negotiation with a human trainee. Choi e of strategy depends on both the personality of the agent and assessment of the likelihood that the negotiation an be bene ial. Exe ution of strategies an be performed by hoosing spe i dialogue behaviors su h as whether and how to respond to a proposal. Current assessment of the value of the topi , the utility of the strategy, and aÆliation toward the other onversants an be used to dynami ally hange strategies throughout the ourse of a onversation. Examples will be given from the SASO-ST proje t, in whi h a trainee learns to negotiate by intera ting with virtual humans who employ these strategies.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kallman, Marcelo; Marsella, Stacy C.
Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans Proceedings Article
In: International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kallman_hierarchical_2005,
title = {Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans},
author = {Marcelo Kallman and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Hierarchical%20Motion%20Controllers%20for%20Real-Time%20Autonomous%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Continuous and synchronized whole-body motions are essential for achieving believable autonomous virtual humans in interactive applications. We present a new motion control architecture based on generic controllers that can be hierarchically interconnected and reused in real-time. The hierarchical organization implies that leaf controllers are motion generators while the other nodes are connectors, performing operations such as interpolation, blending, and precise scheduling of children controllers. We also describe how the system can correctly handle the synchronization of gestures with speech in order to achieve believable conversational characters. For that purpose, different types of controllers implement a generic model of the different phases of a gesture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.; Kenny, Patrick G.; Hovy, Eduard; Narayanan, Shrikanth; Fast, Edward; Martinovski, Bilyana; Baghat, Rahul; Robinson, Susan; Marshall, Andrew; Wang, Dagen; Gandhe, Sudeep; Leuski, Anton
Dealing with Doctors: A Virtual Human for Non-team Interaction Proceedings Article
In: 6th SIGdial Conference on Discourse and Dialogue, Lisbon, Portugal, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_dealing_2005,
title = {Dealing with Doctors: A Virtual Human for Non-team Interaction},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella and Patrick G. Kenny and Eduard Hovy and Shrikanth Narayanan and Edward Fast and Bilyana Martinovski and Rahul Baghat and Susan Robinson and Andrew Marshall and Dagen Wang and Sudeep Gandhe and Anton Leuski},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Doctors.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {6th SIGdial Conference on Discourse and Dialogue},
address = {Lisbon, Portugal},
abstract = {We present a virtual human do tor who an engage in multi-modal negotiation dialogue with people from other organizations. The do tor is part of the SASO-ST system, used for training for non-team intera tions},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a computational model of emotion Journal Article
In: Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004, vol. 11, no. 1, pp. 23–43, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_evaluating_2005,
title = {Evaluating a computational model of emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20computational%20model%20of%20emotion.pdf},
year = {2005},
date = {2005-07-01},
journal = {Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004},
volume = {11},
number = {1},
pages = {23–43},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we evaluate them against the phenomena they purport to model. In this paper, we present one method to evaluate an emotion model that compares the behavior of the model against human behavior using a standard clinical instrument for assessing human emotion and coping. We use this method to evaluate the Emotion and Adaptation (EMA) model of emotion Gratch and Marsella. The evaluation highlights strengths of the approach and identifies where the model needs further development.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
Virtual Humans for non-team interaction training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids, Utrecht, Netherlands, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_virtual_2005,
title = {Virtual Humans for non-team interaction training},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20non-team%20interaction%20training.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids},
address = {Utrecht, Netherlands},
abstract = {We describe a model of virtual humans to be used in training for non-team interactions, such as negotiating with people from other organizations. The virtual humans build on existing task, dialogue, and emotion models, with an added model of trust, which are used to understand and produce interactional moves. The model has been implemented within an agent in the SASO-ST system, and some example dialogues are given, illustrating the necessity for building social bonds.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ettaile, Emil; Gandhe, Sudeep; Georgiou, Panayiotis G.; Knight, Kevin; Marcu, Daniel; Narayanan, Shrikanth; Traum, David; Belvin, Robert
Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues Proceedings Article
In: Proceedings of the ACL Interactive Poster and Demonstration Sessions, pp. 89–92, Ann Arbor, MI, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ettaile_transonics_2005,
title = {Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues},
author = {Emil Ettaile and Sudeep Gandhe and Panayiotis G. Georgiou and Kevin Knight and Daniel Marcu and Shrikanth Narayanan and David Traum and Robert Belvin},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {Proceedings of the ACL Interactive Poster and Demonstration Sessions},
pages = {89–92},
address = {Ann Arbor, MI},
abstract = {We briefly describe a two-way speech-to-speech English-Farsi translation system prototype developed for use in doctorpatient interactions. The overarching philosophy of the developers has been to create a system that enables effective communication, rather than focusing on maximizing component-level performance. The discussion focuses on the general approach and evaluation of the system by an independent government evaluation team.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
A Computational Model of Dynamic Perceptual Attention for Virtual Humans Proceedings Article
In: Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation, Universal City, CA, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_computational_2005,
title = {A Computational Model of Dynamic Perceptual Attention for Virtual Humans},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Dynamic%20Perceptual%20Attention%20for%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation},
address = {Universal City, CA},
abstract = {An important characteristic of a virtual human is the ability to direct its perceptual attention to objects and locations in a virtual environment in a manner that looks believable and serves a functional purpose. We have developed a computational model of perceptual attention that mediates top-down and bottom-up attention processes of virtual humans in virtual environments. In this paper, we propose a perceptual attention model that will integrate perceptual attention toward objects and locations in the environment with the need to look at other parties in a social context.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nijholt, Anton; Traum, David
The Virtuality Continuum Revisited Proceedings Article
In: CHI 2005 Workshop on the Virtuality Continuum Revisited, Portland, OR, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nijholt_virtuality_2005,
title = {The Virtuality Continuum Revisited},
author = {Anton Nijholt and David Traum},
url = {http://ict.usc.edu/pubs/The%20Virtuality%20Continuum%20Revisited.pdf},
year = {2005},
date = {2005-04-01},
booktitle = {CHI 2005 Workshop on the Virtuality Continuum Revisited},
address = {Portland, OR},
abstract = {We survey the themes and the aims of a workshop devoted to the state-of-the-art virtuality continuum. In this continuum, ranging from fully virtual to real physical environments, allowing for mixed, augmented and desktop virtual reality, several perspectives can be taken. Originally, the emphasis was on display technologies. Here we take the perspective of the inhabited environment, that is, environments positioned somewhere on this continuum that are inhabited by virtual (embodied) agents, that interact with each other and with their human partners. Hence, we look at it from the multi-party interaction perspective. In this workshop we will investigate the current state of the art, its shortcomings and a future research agenda.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Marsella, Stacy C.
Rejection of empathy and its linguistic manifestations Proceedings Article
In: Proceedings of Conference on Formal and Informal Negotiation (FINEXIN), Ottowa, Canada, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{martinovski_rejection_2005,
title = {Rejection of empathy and its linguistic manifestations},
author = {Bilyana Martinovski and David Traum and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Rejection%20of%20empathy%20and%20its%20linguistic%20manifestations.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of Conference on Formal and Informal Negotiation (FINEXIN)},
address = {Ottowa, Canada},
abstract = {Trust is a crucial quality in the development of individuals and societies and empathy plays a key role in the formation of trust. Trust and empathy have growing importance in studies of negotiation. However, empathy can be rejected which complicates its role in negotiation. This paper presents a linguistic analysis of empathy by focusing on rejection of empathy in negotiation. Some of the rejections are due to failed recognition of the rejector's needs and desires whereas others have mainly strategic functions gaining momentum in the negotiation. In both cases, rejection of empathy is a phase in the negotiation not a breakdown.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Mao, Wenji; Gratch, Jonathan; Marsella, Stacy C.
Mitigation Theory: An Integrated Approach Proceedings Article
In: Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci), Stresa, Italy, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{martinovski_mitigation_2005,
title = {Mitigation Theory: An Integrated Approach},
author = {Bilyana Martinovski and Wenji Mao and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Mitigation%20Theory-%20An%20Integrated%20Approach.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci)},
address = {Stresa, Italy},
abstract = {The purpose of this paper is to develop a theoretical model of mitigation by integrating cognitive and discourse approaches to appraisal and coping. Mitigation involves strategic, emotional, linguistic, and Theory of Mind processes on different levels of consciousness. We emphasize that discourse analysis can assist our understanding of these processes.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dialog Simulation for Background Characters Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_dialog_2005,
title = {Dialog Simulation for Background Characters},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dialog%20Simulation%20for%20Background%20Characters.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Background characters in virtual environments do not require the same amount of processing that is usually required by main characters, however we want simulation that is more believable than random behavior. We describe an algorithm that generates bhavior for background characters involved in conversation that supports dynamic changes to conversation group structure. We present an evaluation of this algorithm and make suggestions on how to further improve believability of the simulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Natural Behavior of a Listening Agent Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA), pp. 25–36, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{maatman_natural_2005,
title = {Natural Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Natural%20Behavior%20of%20a%20Listening%20Agent.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA)},
pages = {25–36},
address = {Kos, Greece},
abstract = {In contrast to the variety of listening behaviors produced in human-to-human interaction, most virtual agents sit or stand passively when a user speaks. This is a reflection of the fact that although the correct responsive behavior of a listener during a conversation is often related to the semantics, the state of current speech understanding technology is such that semantic information is unavailable until after an utterance is complete. This paper will illustrate that appropriate listening behavior can also be generated by other features of a speaker's behavior that are available in real time such as speech quality, posture shifts and head movements. This paper presents a mapping from these real-time obtainable features of a human speaker to agent listening behaviors.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Evaluating Social Causality and Responsibility Models: An Initial Report Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 03 2005, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_evaluating_2005,
title = {Evaluating Social Causality and Responsibility Models: An Initial Report},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR-03-2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 03 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent sys- tems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich believability and cognitive capabili- ties of social intelligent agents. In this report, we present a general computational model of social causality and responsibility, and empirical results of a preliminary evaluation of the model in comparison with several other approaches.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}