Publications
Search
Nazari, Zahra; Gratch, Jonathan
Predictive Models of Malicious Behavior in Human Negotiations Journal Article
In: Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence, pp. 855–861, 2016.
@article{nazari_predictive_2016,
title = {Predictive Models of Malicious Behavior in Human Negotiations},
author = {Zahra Nazari and Jonathan Gratch},
url = {http://www.ijcai.org/Proceedings/16/Papers/126.pdf},
year = {2016},
date = {2016-07-01},
journal = {Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence},
pages = {855–861},
abstract = {Human and artificial negotiators must exchange information to find efficient negotiated agreements, but malicious actors could use deception to gain unfair advantage. The misrepresentation game is a game-theoretic formulation of how deceptive actors could gain disproportionate rewards while seeming honest and fair. Previous research proposed a solution to this game but this required restrictive assumptions that might render it inapplicable to realworld settings. Here we evaluate the formalism against a large corpus of human face-to-face negotiations. We confirm that the model captures how dishonest human negotiators win while seeming fair, even in unstructured negotiations. We also show that deceptive negotiators give-off signals of their malicious behavior, providing the opportunity for algorithms to detect and defeat this malicious tactic.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification Journal Article
In: Journal of Artificial General Intelligence, 2016, ISSN: 1946-0163.
@article{rosenbloom_sigma_2016,
title = {The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://www.degruyter.com/view/j/jagi.ahead-of-print/jagi-2016-0001/jagi-2016-0001.xml},
doi = {10.1515/jagi-2016-0001},
issn = {1946-0163},
year = {2016},
date = {2016-07-01},
journal = {Journal of Artificial General Intelligence},
abstract = {Sigma (Σ) is a cognitive architecture and system whose development is driven by a combination of four desiderata: grand unification, generic cognition, functional elegance, and sufficient efficiency. Work towards these desiderata is guided by the graphical architecture hypothesis, that key to progress on them is combining what has been learned from over three decades’ worth of separate work on cognitive architectures and graphical models. In this article, these four desiderata are motivated and explained, and then combined with the graphical architecture hypothesis to yield a rationale for the development of Sigma. The current state of the cognitive architecture is then introduced in detail, along with the graphical architecture that sits below it and implements it. Progress in extending Sigma beyond these architectures and towards a full cognitive system is then detailed in terms of both a systematic set of higher level cognitive idioms that have been developed and several virtual humans that are built from combinations of these idioms. Sigma as a whole is then analyzed in terms of how well the progress to date satisfies the desiderata. This article thus provides the first full motivation, presentation and analysis of Sigma, along with a diversity of more specific results that have been generated during its development.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Gainer, Alesia; Georgila, Kallirroi; Leuski, Anton; Shapiro, Ari; Traum, David
New Dimensions in Testimony Demonstration Proceedings Article
In: Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations, pp. 32–36, Association for Computational Linguistics, San Diego, California, 2016.
@inproceedings{artstein_new_2016,
title = {New Dimensions in Testimony Demonstration},
author = {Ron Artstein and Alesia Gainer and Kallirroi Georgila and Anton Leuski and Ari Shapiro and David Traum},
url = {http://www.aclweb.org/anthology/N16-3007},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations},
pages = {32–36},
publisher = {Association for Computational Linguistics},
address = {San Diego, California},
abstract = {New Dimensions in Testimony is a prototype dialogue system that allows users to conduct a conversation with a real person who is not available for conversation in real time. Users talk to a persistent representation of Holocaust survivor Pinchas Gutter on a screen, while a dialogue agent selects appropriate responses to user utterances from a set of pre-recorded video statements, simulating a live conversation. The technology is similar to existing conversational agents, but to our knowledge this is the first system to portray a real person. The demonstration will show the system on a range of screens (from mobile phones to large TVs), and allow users to have individual conversations with Mr. Gutter.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mizukami, Masahiro; Traum, David; Yoshino, Koichiro; Neubig, Graham; Nakamura, Satoshi
Word and Dialogue Act Entrainment Analysis based on User Profile Proceedings Article
In: Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence, Kitakyushu, Japan, 2016.
@inproceedings{mizukami_word_2016,
title = {Word and Dialogue Act Entrainment Analysis based on User Profile},
author = {Masahiro Mizukami and David Traum and Koichiro Yoshino and Graham Neubig and Satoshi Nakamura},
url = {https://kaigi.org/jsai/webprogram/2016/pdf/356.pdf},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence},
address = {Kitakyushu, Japan},
abstract = {Patterns of dialogue act and word selection are observable in dialogue. Entrainment is the factor that might account for these patterns. We test the entrainment hypotheses using the switchboard corpus, comparing speech of different speakers from different parts of the dialogue, but also speech of the same speaker at different points. Our ⬚ndings replicate previous studies that dialogue participants converge toward each other in word choice, but we also investigate novel measures of entrainment of dialogue act selection, and word choice for speci⬚c dialogue acts. These studies inform a design for dialogue systems that would show human-like degrees of entrainment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul
Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture Book Section
In: Integrating Cognitive Architectures into Virtual Character Design, pp. 213 – 237, IGI Global, Hershey, PA, 2016, ISBN: 978-1-5225-0454-2.
@incollection{ustun_towards_2016,
title = {Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul Rosenbloom},
url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-5225-0454-2},
isbn = {978-1-5225-0454-2},
year = {2016},
date = {2016-06-01},
booktitle = {Integrating Cognitive Architectures into Virtual Character Design},
pages = {213 – 237},
publisher = {IGI Global},
address = {Hershey, PA},
abstract = {Realism is required not only for how synthetic characters look but also for how they behave. Many applications, such as simulations, virtual worlds, and video games, require computational models of intelligence that generate realistic and credible behavior for the participating synthetic characters. Sigma (Σ) is being built as a computational model of general intelligence with a long-term goal of understanding and replicating the architecture of the mind; i.e., the fixed structure underlying intelligent behavior. Sigma leverages probabilistic graphical models towards a uniform grand unification of not only traditional cognitive capabilities but also key non-cognitive aspects, creating unique opportunities for the construction of new kinds of non-modular behavioral models. These ambitions strive for the complete control of synthetic characters that behave as humanly as possible. In this paper, Sigma is introduced along with two disparate proof-of-concept virtual humans – one conversational and the other a pair of ambulatory agents – that demonstrate its diverse capabilities.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Swartout, William R.
Virtual Humans as Centaurs: Melding Real and Virtual Book Section
In: Virtual, Augmented and Mixed Reality, vol. 9740, pp. 356–359, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39906-5 978-3-319-39907-2.
@incollection{swartout_virtual_2016,
title = {Virtual Humans as Centaurs: Melding Real and Virtual},
author = {William R. Swartout},
url = {http://link.springer.com/10.1007/978-3-319-39907-2_34},
isbn = {978-3-319-39906-5 978-3-319-39907-2},
year = {2016},
date = {2016-06-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9740},
pages = {356–359},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Centaurs are man-machine teams that can work together on problems and can out-perform, either people or computers working alone in domains as varied as chess-playing and protein folding. But the centaur of Greek mythology was not a team, but rather a hybrid of man and horse with some of the characteristics of each. In this paper, we outline our efforts to build virtual humans, which might be considered hybrid centaurs, combining features of both people and machines. We discuss experimental evidence that shows that these virtual human hybrids can outperform both people and inanimate processes in some tasks such as medical interviewing.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Ghosh, Eugene Laksana Satan; Morency, Louis-Philippe; Scherer, Stefen
Learning Representations of Affect from Speech Proceedings Article
In: ICLR 2016, ICLR, San Juan, Puerto Rico, 2016.
@inproceedings{ghosh_eugene_laksana_satan_learning_2016,
title = {Learning Representations of Affect from Speech},
author = {Eugene Laksana Satan Ghosh and Louis-Philippe Morency and Stefen Scherer},
url = {http://arxiv.org/pdf/1511.04747.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {ICLR 2016},
publisher = {ICLR},
address = {San Juan, Puerto Rico},
abstract = {There has been a lot of prior work on representation learning for speech recognition applications, but not much emphasis has been given to an investigation of effective representations of affect from speech, where the paralinguistic elements of speech are separated out from the verbal content. In this paper, we explore denoising autoencoders for learning paralinguistic attributes, i.e. categorical and dimensional affective traits from speech. We show that the representations learnt by the bottleneck layer of the autoencoder are highly discriminative of activation intensity and at separating out negative valence (sadness and anger) from positive valence (happiness). We experiment with different input speech features (such as FFT and log-mel spectrograms with temporal context windows), and different autoencoder architectures (such as stacked and deep autoencoders). We also learn utterance specific representations by a combination of denoising autoencoders and BLSTM based recurrent autoencoders. Emotion classification is performed with the learnt temporal/dynamic representations to evaluate the quality of the representations. Experiments on a well-established real-life speech dataset (IEMOCAP) show that the learnt representations are comparable to state of the art feature extractors (such as voice quality features and MFCCs) and are competitive with state-of-the-art approaches at emotion and dimensional affect recognition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Collins, Kathryn J.; Traum, David
Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 118–124, European Language Resources Association, Portorož, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{collins_towards_2016,
title = {Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue},
author = {Kathryn J. Collins and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/354_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {118–124},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {In this paper, we present a taxonomy of stories told in dialogue. We based our scheme on prior work analyzing narrative structure and method of telling, relation to storyteller identity, as well as some categories particular to dialogue, such as how the story gets introduced. Our taxonomy currently has 5 major dimensions, with most having sub-dimensions - each dimension has an associated set of dimension-specific labels. We adapted an annotation tool for this taxonomy and have annotated portions of two different dialogue corpora, Switchboard and the Distress Analysis Interview Corpus. We present examples of some of the tags and concepts with stories from Switchboard, and some initial statistics of frequencies of the tags.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Wortwein, Torsten; Morency, Louis–Philippe; Scherer, Stefan
A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety Proceedings Article
In: Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation, pp. 488–495, European Language Resources Association, Portoroz, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{chollet_multimodal_2016,
title = {A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety},
author = {Mathieu Chollet and Torsten Wortwein and Louis–Philippe Morency and Stefan Scherer},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/599_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation},
pages = {488–495},
publisher = {European Language Resources Association},
address = {Portoroz, Slovenia},
abstract = {The ability to efficiently speak in public is an essential asset for many professions and is used in everyday life. As such, tools enabling the improvement of public speaking performance and the assessment and mitigation of anxiety related to public speaking would be very useful. Multimodal interaction technologies, such as computer vision and embodied conversational agents, have recently been investigated for the training and assessment of interpersonal skills. Once central requirement for these technologies is multimodal corpora for training machine learning models. This paper addresses the need of these technologies by presenting and sharing a multimodal corpus of public speaking presentations. These presentations were collected in an experimental study investigating the potential of interactive virtual audiences for public speaking training. This corpus includes audio-visual data and automatically extracted features, measures of public speaking anxiety and personality, annotations of participants’ behaviors and expert ratings of behavioral aspects and overall performance of the presenters. We hope this corpus will help other research teams in developing tools for supporting public speaking training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 949–956, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{de_melo_as_2016,
title = {"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937063},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {949–956},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {There has been growing interest, across various domains, in computer agents that can decide on behalf of humans. These agents have the potential to save considerable time and help humans reach better decisions. One implicit assumption, however, is that, as long as the algorithms that simulate decision-making are correct and capture how humans make decisions, humans will treat these agents similarly to other humans. Here we show that interaction with agents that act on our behalf or on behalf of others is richer and more interesting than initially expected. Our results show that, on the one hand, people are more selfish with agents acting on behalf of others, than when interacting directly with others. We propose that agents increase the social distance with others which, subsequently, leads to increased demand. On the other hand, when people task an agent to interact with others, people show more concern for fairness than when interacting directly with others. In this case, higher psychological distance leads people to consider their social image and the long-term consequences of their actions and, thus, behave more fairly. To support these findings, we present an experiment where people engaged in the ultimatum game, either directly or via an agent, with others or agents representing others. We show that these patterns of behavior also occur in a variant of the ultimatum game – the impunity game – where others have minimal power over the final outcome. Finally, we study how social value orientation – i.e., people’s propensity for cooperation – impact these effects. These results have important implications for our understanding of the psychological mechanisms underlying interaction with agents, as well as practical implications for the design of successful agents that act on our behalf or on behalf of others.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Traum, David
Towards Automatic Identification of Effective Clues for Team Word-Guessing Games Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 2741–2747, European Language Resources Association, Portorož, Slovenia, 2016.
@inproceedings{pincus_towards_2016,
title = {Towards Automatic Identification of Effective Clues for Team Word-Guessing Games},
author = {Eli Pincus and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/762_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {2741–2747},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {Team word-guessing games where one player, the clue-giver, gives clues attempting to elicit a target-word from another player, the receiver, are a popular form of entertainment and also used for educational purposes. Creating an engaging computational agent capable of emulating a talented human clue-giver in a timed word-guessing game depends on the ability to provide effective clues (clues able to elicit a correct guess from a human receiver). There are many available web resources and databases that can be mined for the raw material for clues for target-words; however, a large number of those clues are unlikely to be able to elicit a correct guess from a human guesser. In this paper, we propose a method for automatically filtering a clue corpus for effective clues for an arbitrary target-word from a larger set of potential clues, using machine learning on a set of features of the clues, including point-wise mutual information between a clue’s constituent words and a clue’s target-word. The results of the experiments significantly improve the average clue quality over previous approaches, and bring quality rates in-line with measures of human clue quality derived from a corpus of human-human interactions. The paper also introduces the data used to develop this method; audio recordings of people making guesses after having heard the clues being spoken by a synthesized voice (Pincus and Traum, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Sheetz, Kraig; Lucas, Gale; Traum, David
What Kind of Stories Should a Virtual Human Swap? Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1437–1438, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{nasihati_gilani_what_2016,
title = {What Kind of Stories Should a Virtual Human Swap?},
author = {Setareh Nasihati Gilani and Kraig Sheetz and Gale Lucas and David Traum},
url = {http://dl.acm.org/citation.cfm?id=2937198},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1437–1438},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Stories are pervasive in conversation between people [5]. They are used to establish identity pass on cultural heritage, and build rapport. Often stories are swapped when one conversational participant will reply to a story with a story. Stories are also told by virtual humans [1, 6, 2]. In creating or mining stories for a virtual human (VH) to tell, there are a number of considerations that come up about what kinds of stories should be told, and how the stories should be related to the virtual human's identity, such as whether the identity should be human or arti⬚cial, and whether the stories should be about the virtual human or about someone else. We designed a set of virtual human characters who can engage in a simple form of story-swapping. Each of the characters can engage in simple interactions such as greetings and closings and can respond to a set of textbackslashtextbackslashtextbackslashtextbackslashice-breaker" questions, that might be used on a ⬚rst date or similar textbackslashtextbackslashtextbackslashtextbackslashget to know you" encounter. For these questions the character's answer includes a story. We created 4 character response sets, to have all combinations of identity (human or arti⬚cial) and perspective (⬚rst person stories about the narrator, or third person stories about someone else). We also designed an experiment to try to explore the collective impact of above principles on people who interact with the characters. Participants interact with two of the above characters in a "get to know you" scenario. We investigate the degree of reciprocity where people respond to the character with their own stories, and also compare rapport of participants with the characters as well as the impressions of the character's personality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso De; Marsella, Stacy; Gratch, Jonathan
People Don’t Feel Guilty About Exploiting Machines Journal Article
In: ACM Transactions on Computer-Human Interaction (TOCHI), vol. 23, no. 2, pp. 1–17, 2016, ISSN: 1073-0516.
@article{melo_people_2016,
title = {People Don’t Feel Guilty About Exploiting Machines},
author = {Celso De Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2890495},
doi = {10.1145/2890495},
issn = {1073-0516},
year = {2016},
date = {2016-05-01},
journal = {ACM Transactions on Computer-Human Interaction (TOCHI)},
volume = {23},
number = {2},
pages = {1–17},
abstract = {Guilt and envy play an important role in social interaction. Guilt occurs when individuals cause harm to others or break social norms. Envy occurs when individuals compare themselves unfavorably to others and desire to benefit from the others’ advantage. In both cases, these emotions motivate people to act and change the status quo: following guilt, people try to make amends for the perceived transgression and, following envy, people try to harm envied others. In this paper, we present two experiments that study participants' experience of guilt and envy when engaging in social decision making with machines and humans. The results showed that, though experiencing the same level of envy, people felt considerably less guilt with machines than with humans. These effects occurred both with subjective and behavioral measures of guilt and envy, and in three different economic games: public goods, ultimatum, and dictator game. This poses an important challenge for human-computer interaction because, as shown here, it leads people to systematically exploit machines, when compared to humans. We discuss theoretical and practical implications for the design of human-machine interaction systems that hope to achieve the kind of efficiency – cooperation, fairness, reciprocity, etc. – we see in human-human interaction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Konovalov, Vasily; Artstein, Ron; Melamud, Oren; Dagan, Ido
The Negochat Corpus of Human-agent Negotiation Dialogues Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 3141–3145, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{konovalov_negochat_2016,
title = {The Negochat Corpus of Human-agent Negotiation Dialogues},
author = {Vasily Konovalov and Ron Artstein and Oren Melamud and Ido Dagan},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/240_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {3141–3145},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {Annotated in-domain corpora are crucial to the successful development of dialogue systems of automated agents, and in particular for developing natural language understanding (NLU) components of such systems. Unfortunately, such important resources are scarce. In this work, we introduce an annotated natural language human-agent dialogue corpus in the negotiation domain. The corpus was collected using Amazon Mechanical Turk following the ‘Wizard-Of-Oz’ approach, where a ‘wizard’ human translates the participants’ natural language utterances in real time into a semantic language. Once dialogue collection was completed, utterances were annotated with intent labels by two independent annotators, achieving high inter-annotator agreement. Our initial experiments with an SVM classifier show that automatically inferring such labels from the utterances is far from trivial. We make our corpus publicly available to serve as an aid in the development of dialogue systems for negotiation agents, and suggest that analogous corpora can be created following our methodology and using our available source code. To the best of our knowledge this is the first publicly available negotiation dialogue corpus.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
ZarrieB, Sina; Hough, Julian; Kennington, Casey; Manuvinakurike, Ramesh; DeVault, David; Fernández, Raquel; Schlangen, David
PentoRef: A Corpus of Spoken References in Task-oriented Dialogues Proceedings Article
In: 10th edition of the Language Resources and Evaluation Conference, ELRA, Portorož, Slovenia, 2016.
@inproceedings{zarrieb_pentoref_2016,
title = {PentoRef: A Corpus of Spoken References in Task-oriented Dialogues},
author = {Sina ZarrieB and Julian Hough and Casey Kennington and Ramesh Manuvinakurike and David DeVault and Raquel Fernández and David Schlangen},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/563_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {10th edition of the Language Resources and Evaluation Conference},
publisher = {ELRA},
address = {Portorož, Slovenia},
abstract = {PentoRef is a corpus of task-oriented dialogues collected in systematically manipulated settings. The corpus is multilingual, with English and German sections, and overall comprises more than 20000 utterances. The dialogues are fully transcribed and annotated with referring expressions mapped to objects in corresponding visual scenes, which makes the corpus a rich resource for research on spoken referring expressions in generation and resolution. The corpus includes several sub-corpora that correspond to different dialogue situations where parameters related to interactivity, visual access, and verbal channel have been manipulated in systematic ways. The corpus thus lends itself to very targeted studies of reference in spontaneous dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Uryupina, Olga; Artstein, Ron; Bristot, Antonella; Cavicchio, Federica; Rodriguez, Kepa; Poesio, Massimo
ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 2058–2062, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{uryupina_arrau_2016,
title = {ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions},
author = {Olga Uryupina and Ron Artstein and Antonella Bristot and Federica Cavicchio and Kepa Rodriguez and Massimo Poesio},
url = {http://www.lrec-conf.org/proceedings/lrec2016/summaries/1121.html},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {2058–2062},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {This paper presents a second release of the ARRAU dataset: a multi-domain corpus with thorough linguistically motivated annotation of anaphora and related phenomena. Building upon the first release almost a decade ago, a considerable effort had been invested in improving the data both quantitatively and qualitatively. Thus, we have doubled the corpus size, expanded the selection of covered phenomena to include referentiality and genericity and designed and implemented a methodology for enforcing the consistency of the manual annotation. We believe that the new release of ARRAU provides a valuable material for ongoing research in complex cases of coreference as well as for a variety of related tasks. The corpus is publicly available through LDC.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan
IAGO: Interactive Arbitration Guide Online Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1510–1512, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{mell_iago_2016,
title = {IAGO: Interactive Arbitration Guide Online},
author = {Johnathan Mell and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937230},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1510–1512},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Automated negotiation between two agents has been the subject of much research focused on optimization and efficiency. Howev-er, human-agent negotiation represents a field in which real-world considerations can be more fully explored. Furthermore, teaching negotiation and other interpersonal skills requires long periods of practice with open-ended dialogues and partners. The API pre-sented in this paper represents a novel platform on which to con-duct human-agent research and facilitate teaching negotiation tactics in a longitudinal way. We present a prototype demonstra-tion that is real-time, rapidly distributable, and allows more ac-tions than current platforms of negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Nazari, Zahra; Johnson, Emmanuel
The Misrepresentation Game: How to win at negotiation while seeming like a nice guy Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 728–737, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{gratch_misrepresentation_2016,
title = {The Misrepresentation Game: How to win at negotiation while seeming like a nice guy},
author = {Jonathan Gratch and Zahra Nazari and Emmanuel Johnson},
url = {http://dl.acm.org/citation.cfm?id=2937031},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {728–737},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Recently, interest has grown in agents that negotiate with people: to teach negotiation, to negotiate on behalf of people, and as a chal-lenge problem to advance artificial social intelligence. Humans ne-gotiate differently from algorithmic approaches to negotiation: peo-ple are not purely self-interested but place considerable weight on norms like fairness; people exchange information about their men-tal state and use this to judge the fairness of a social exchange; and people lie. Here, we focus on lying. We present an analysis of how people (or agents interacting with people) might optimally lie (maximally benefit themselves) while maintaining the illusion of fairness towards the other party. In doing so, we build on concepts from game theory and the preference-elicitation literature, but ap-ply these to human, not rational, behavior. Our findings demon-strate clear benefits to lying and provide empirical support for a heuristic – the “fixed-pie lie” – that substantially enhances the effi-ciency of such deceptive algorithms. We conclude with implica-tions and potential defenses against such manipulative techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Nye, Benjamin D.; Hartholt, Arno; Reilly, Adam; Graesser, Arthur C.; VanLehn, Kurt; Wetzel, Jon; Liewer, Matt; Morbini, Fabrizio; Morgan, Brent; Wang, Lijia; Benn, Grace; Rosenberg, Milton
Designing a Personal Assistant for Life-Long Learning (PAL3) Proceedings Article
In: Proceedings of The Twenty-Ninth International Flairs Conference, pp. 491–496, AAAI Press, Key Largo, FL, 2016, ISBN: 978-1-57735-756-8.
@inproceedings{swartout_designing_2016,
title = {Designing a Personal Assistant for Life-Long Learning (PAL3)},
author = {William Swartout and Benjamin D. Nye and Arno Hartholt and Adam Reilly and Arthur C. Graesser and Kurt VanLehn and Jon Wetzel and Matt Liewer and Fabrizio Morbini and Brent Morgan and Lijia Wang and Grace Benn and Milton Rosenberg},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12793},
isbn = {978-1-57735-756-8},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of The Twenty-Ninth International Flairs Conference},
pages = {491–496},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Learners’ skills decay during gaps in instruction, since they lack the structure and motivation to continue studying. To meet this challenge, the PAL3 system was designed to accompany a learner throughout their career and mentor them to build and maintain skills through: 1) the use of an embodied pedagogical agent (Pal), 2) a persistent learning record that drives a student model which estimates forgetting, 3) an adaptive recommendation engine linking to both intelligent tutors and traditional learning resources, and 4) game-like mechanisms to promote engagement (e.g., leaderboards, effort-based point rewards, unlocking customizations). The design process for PAL3 is discussed, from the perspective of insights and revisions based on a series of formative feedback and evaluation sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2008
DeVault, David; Traum, David; Artstein, Ron
Practical Grammar-Based NLG from Examples Proceedings Article
In: The Fifth International Natural Language Generation Conference (INLG 2008), Salt Fork, OH, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{devault_practical_2008,
title = {Practical Grammar-Based NLG from Examples},
author = {David DeVault and David Traum and Ron Artstein},
url = {http://ict.usc.edu/pubs/Practical%20Grammar-Based%20NLG%20from%20Examples%20.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {The Fifth International Natural Language Generation Conference (INLG 2008)},
address = {Salt Fork, OH},
abstract = {We present a technique that opens up grammar-based generation to a wider range of practical applications by dramatically reducing the development costs and linguistic expertise that are required. Our method infers the grammatical resources needed for generation from a set of declarative examples that link surface expressions directly to the application's available semantic representations. The same examples further serve to optimize a run-time search strategy that generates the best output that can be found within an application-speciï¬c time frame. Our method offers substantially lower development costs than hand-crafted grammars for applicationspeciï¬c NLG, while maintaining high output quality and diversity.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
DeVault, David; Traum, David; Artstein, Ron
Making Grammar-Based Generation Easier to Deploy in Dialogue Systems Proceedings Article
In: 9th SIGdial Workshop on Discourse and Dialogue, Columbus, OH, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{devault_making_2008,
title = {Making Grammar-Based Generation Easier to Deploy in Dialogue Systems},
author = {David DeVault and David Traum and Ron Artstein},
url = {http://ict.usc.edu/pubs/Making%20Grammar-Based%20Generation%20Easier%20to%20Deploy%20in%20Dialogue%20Systems%20.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th SIGdial Workshop on Discourse and Dialogue},
address = {Columbus, OH},
abstract = {We present a development pipeline and associated algorithms designed to make grammarbased generation easier to deploy in implemented dialogue systems. Our approach realizes a practical trade-off between the capabilities of a system's generation component and the authoring and maintenance burdens imposed on the generation content author for a deployed system. To evaluate our approach, we performed a human rating study with system builders who work on a common largescale spoken dialogue system. Our results demonstrate the viability of our approach and illustrate authoring/performance trade-offs between hand-authored text, our grammar-based approach, and a competing shallow statistical NLG technique},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Pataki, Caroly; Pato, Michele; George, Cheryl St.; Sugar, Jeff; Rizzo, Albert
Virtual Justina: A PTSD Virtual Patient for Clinical Classroom Training Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine, pp. 113–118, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2008-2,
title = {Virtual Justina: A PTSD Virtual Patient for Clinical Classroom Training},
author = {Patrick G. Kenny and Thomas D. Parsons and Caroly Pataki and Michele Pato and Cheryl St. George and Jeff Sugar and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Justina-%20A%20PTSD%20Virtual%20Patient%20for%20Clinical%20Classroom%20Training.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine},
volume = {6},
pages = {113–118},
abstract = {The effects of trauma exposure manifest themselves in a wide range of symptoms: anxiety, post-traumatic stress disorder, fear, and various behavior problems. Effective interview skills are a core competency for the clinicians who will be working with children and adolescents exposed to trauma. The current project aims to improve child and adolescent psychiatry residents, and medical students’ interviewing skills and diagnostic acumen through practice with a female adolescent virtual human with post-traumatic stress disorder. This interaction with a virtual patient provides a context where immediate feedback can be provided regarding trainees’ interviewing skills in terms of psychiatric knowledge, sensitivity, and effectiveness. Results suggest that a virtual standardized patient can generate responses that elicit user questions relevant for PTSD categorization. We conclude with a discussion of the ways in which these capabilities allow virtual patients to serve as unique training tools whose special knowledge and reactions can be continually fed back to trainees. Our initial goal is to focus on a virtual patient with PTSD, but a similar strategy could be applied to teaching a broad variety of psychiatric diagnoses to trainees at every level from medical students, to psychiatry residents, to child and adolescent psychiatry residents.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
Degrees of Grounding Based on Evidence of Understanding Proceedings Article
In: 9th SIGdial Workshop on Discourse and Dialogue, Columbus, OH, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_degrees_2008,
title = {Degrees of Grounding Based on Evidence of Understanding},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/Degrees%20of%20Grounding%20Based%20on%20Evidence%20of%20Understanding.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th SIGdial Workshop on Discourse and Dialogue},
address = {Columbus, OH},
abstract = {We introduce the Degrees of Grounding model, which defines the extent to which material being discussed in a dialogue has been grounded. This model has been developed and evaluated by a corpus analysis, and includes a set of types of evidence of understanding, a set of degrees of groundedness, a set of grounding criteria, and methods for identifying each of these. We describe how this model can be used for dialogue management.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Thiebaux, Marcus; Marshall, Andrew; Marsella, Stacy C.; Kallmann, Marcelo
SmartBody: Behavior Realization for Embodied Conversational Agents Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Estoril, Portugal, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{thiebaux_smartbody_2008,
title = {SmartBody: Behavior Realization for Embodied Conversational Agents},
author = {Marcus Thiebaux and Andrew Marshall and Stacy C. Marsella and Marcelo Kallmann},
url = {http://ict.usc.edu/pubs/SmartBody-%20Behavior%20Realization%20for%20Embodies%20Conversational%20Agents.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Estoril, Portugal},
abstract = {Researchers demand much from their embodied conversational agents (ECA), requiring them to be both life-like, as well as responsive to events in an interactive setting. We find that a flexible combination of animation approaches may be needed to satisfy these needs. In this paper we present SmartBody, an open source modular framework for animating ECAs in real time, based on the notion of hierarchically connected animation controllers. Controllers in SmartBody can employ arbitrary animation algorithms such as keyfram interpolation, motion capture or procedural animation. Controllers can also schedule or combine other controllers. We discuss our architecture in detail, including how we incorporate traditional approaches, and develop the the notion of a controller as a reative module with in a generic gramework, for realizing modular animation control. To illustrate the versatility of the architecture, we also discuss a range of applications that have used SmartBody successfully.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Traum, David; Ittycheriah, Midhun; Henderer, Joe
What would you ask a Conversational Agent? Observations of Human-Agent Dialogues in a Museum Setting Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_what_2008,
title = {What would you ask a Conversational Agent? Observations of Human-Agent Dialogues in a Museum Setting},
author = {Susan Robinson and David Traum and Midhun Ittycheriah and Joe Henderer},
url = {http://ict.usc.edu/pubs/What%20would%20you%20ask%20a%20conversational%20agent.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {Embodied Conversational Agents have typically been constructed for use in limited domain applications, and tested in very specialized environments. Only in recent years have there been more cases of moving agents into wider public applications (e.g. Bell et al., 2003; Kopp et al., 2005). Yet little analysis has been done to determine the differing needs, expectations, and behavior of human users in these environments. With an increasing trend for virtual characters to �go public�, we need to expand our understanding of what this entails for the design and capabilities of our characters. This paper explores these issues through an analysis of a corpus that has been collected since December 2006, from interactions with the virtual character Sgt Blackwell at the Cooper Hewitt Museum in New York. The analysis includes 82 hierarchical categories of user utterances, as well as specific observations on user preferences and behaviors drawn from interactions with Blackwell.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Russ, Thomas; Traum, David; Hovy, Eduard; Robinson, Susan
A Common Ground for Virtual Humans: Using an Ontology in a Natural Language Oriented Virtual Human Architecture Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hartholt_common_2008,
title = {A Common Ground for Virtual Humans: Using an Ontology in a Natural Language Oriented Virtual Human Architecture},
author = {Arno Hartholt and Thomas Russ and David Traum and Eduard Hovy and Susan Robinson},
url = {http://ict.usc.edu/pubs/A%20Common%20Ground%20for%20Virtual%20Humans-%20Using%20an%20Ontology%20in%20a%20Natural%20Language%20Oriented%20Virtual%20Human%20Architecture.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {When dealing with large, distributed systems that use state-of-the-art components, individual components are usually developed in parallel. As development continues, the decoupling invariably leads to a mismatch between how these components internally represent concepts and how they communicate these representations to other components: representations can get out of synch, contain localized errors, or become manageable only by a small group of experts for each module. In this paper, we describe the use of an ontology as part of a complex distributed virtual human architecture in order to enable better communication between modules while improving the overall flexibility needed to change or extend the system. We focus on the natural language understanding capabilities of this architecture and the relationship between language and concepts within the entire system in general and the ontology in particular.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jina; DeVault, David; Marsella, Stacy C.; Traum, David
Thoughts on FML: Behavior Generation in the Virtual Human Communication Architecture Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) First Functional Markup Language Workshop, Estoril, Portugal, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{lee_thoughts_2008,
title = {Thoughts on FML: Behavior Generation in the Virtual Human Communication Architecture},
author = {Jina Lee and David DeVault and Stacy C. Marsella and David Traum},
url = {http://ict.usc.edu/pubs/Thoughts%20on%20FML.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) First Functional Markup Language Workshop},
address = {Estoril, Portugal},
abstract = {We discuss our current architecture for the generation of natural language and non-verbal behavior in ICT virtual humans. We draw on our experience developing this archi- tecture to present our current perspective on several issues related to the standardization of FML and to the SAIBA framework more generally. In particular, we discuss our current use, and non-use, of FML-inspired representations in generating natural language, eye gaze, and emotional dis- plays. We also comment on some of the shortcomings of our design as currently implemented.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gandhe, Sudeep; Leuski, Anton; Traum, David
Field Testing of an Interactive Question-Answering Character Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_field_2008,
title = {Field Testing of an Interactive Question-Answering Character},
author = {Ron Artstein and Sudeep Gandhe and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Field%20Testing%20of%20an%20Interactive%20Question-Answering%20Character%20.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {We tested a life-size embodied question-answering character at a convention where he responded to questions from the audience. The character's responses were then rated for coherence. The ratings, combined with speech transcripts, speech recognition results and the character's responses, allowed us to identify where the character needs to improve, namely in speech recognition and providing off-topic responses.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
True Emotion vs. Social Intentions in Nonverbal Communication: Towards a Synthesis for Embodied Conversational Agents Journal Article
In: Lecture Notes in Computer Science, vol. 4930, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{gratch_true_2008-1,
title = {True Emotion vs. Social Intentions in Nonverbal Communication: Towards a Synthesis for Embodied Conversational Agents},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/True%20Emotion%20vs%20Social%20Intentions%20in%20Nonverbal%20Communication-%20Towards%20a%20Synthesis%20for%20Embodied%20Conversational%20Agents.pdf},
year = {2008},
date = {2008-01-01},
journal = {Lecture Notes in Computer Science},
volume = {4930},
abstract = {Does a facial expression convey privileged information about a person's mental state or is it a communicative act, divorced from "true" beliefs, desires and intentions? This question is often cast as a dichotomy between competing theoretical perspectives. Theorists like Ekman argue for the primacy of emotion as a determinant of nonverbal behavior: emotions "leak" and only indirectly serve social ends. In contrast, theorists such as Fridlund argue for the primacy of social ends in determining nonverbal displays. This dichotomy has worked to divide virtual character research. Whereas there have been advances in modeling emotion, this work is often seen as irrelevant to the generation of communicative behavior. In this chapter, I review current findings on the interpersonal function of emotion. I'll discuss recent developments in Social Appraisal theory as a way to bridge this dichotomy and our attempts to model these functions within the context of embodied conversational agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Carre, David; Levasseur, Marco; Gratch, Jonathan; Jacopin, Eric
Multimodal Toolbox: Analyzing Gestures Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 03 2008, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{carre_multimodal_2008,
title = {Multimodal Toolbox: Analyzing Gestures},
author = {David Carre and Marco Levasseur and Jonathan Gratch and Eric Jacopin},
url = {http://ict.usc.edu/pubs/ICT%20TR%2003%202008.pdf},
year = {2008},
date = {2008-01-01},
number = {ICT TR 03 2008},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Rapport between people and virtual human agents is not limited to just speech. There are many non-verbal behaviors such as gestures or facial expressions that can express feelings or convey a message. One of the challenges in making an agent appear more realistic is to make his non-verbal behaviors appear more natural. To accomplish this, it is essential to find out how and when gestures are performed. In order to determine how gestures are performed, it is necessary to assess different appearances of the same gesture and the mapping between their respective function. To determine when gestures are performed, the key is to find relevant contextual features and their links with gestures, which will lead to the prediction of the moment they should be performed. Finally, both of these issues can now be tackled with the provided toolbox. Preliminary results show that we have some gesture pattern. Beside, we were able, based on contextual features, to predict when the agent should nod his head. Early results appear to show the agent nods at an opportune time. Moreover, this toolbox generalizes the results to other kind of gestures than head nods, which is the goal of this study.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
A Virtual Human Dialogue Model for Non-team Interaction Book Section
In: Recent Trends in Discourse and Dialogue, vol. 39, pp. 45–67, Springer, Dordecht, The Netherlands, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{traum_virtual_2008,
title = {A Virtual Human Dialogue Model for Non-team Interaction},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Human%20Dialogue%20Model%20for%20Non-team%20Interaction.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Recent Trends in Discourse and Dialogue},
volume = {39},
pages = {45–67},
publisher = {Springer},
address = {Dordecht, The Netherlands},
series = {Text, Speech and Language Technology},
abstract = {We describe the dialogue model for the virtual humans developed at the Institute for Creative Technologies at the University of Southern California. The dialogue model contains a rich set of information state and dialogue moves to allow a wide range of behaviour in multimodal, multiparty interaction. We extend this model to enable non-team negotiation, using ideas from social science literature on negotiation and implemented strategies and dialogue moves for this area. We present a virtual human doctor who uses this model to engage in multimodal negotiation dialogue with people from other organisations. The doctor is part of the SASO-ST system, used for training for non-team interactions.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Kang, Sin-Hwa; Gratch, Jonathan; Wang, Ning; Watt, James H.
Agreeable People Like Agreeable Virtual Humans Proceedings Article
In: Lecture Notes in Computer Science, pp. 253–261, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kang_agreeable_2008,
title = {Agreeable People Like Agreeable Virtual Humans},
author = {Sin-Hwa Kang and Jonathan Gratch and Ning Wang and James H. Watt},
url = {http://ict.usc.edu/pubs/Agreeable%20People%20Like%20Agreeable%20Virtual%20Humans.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Lecture Notes in Computer Science},
pages = {253–261},
abstract = {This study explored associations between the five-factor personality traits of human subjects and their feelings of rapport when they interacted with a virtual agent or real humans. The agent, the Rapport Agent, responded to real human speakers' storytelling behavior, using only nonverbal contingent (i.e., timely) feedback. We further investigated how interactants' personalities were related to the three components of rapport: positivity, attentiveness, and coordination. The results revealed that more agreeable people showed strong self-reported rapport and weak behavioral measured rapport in the disfluency dimension when they interacted with the Rapport Agent, while showing no significant associations between agreeableness and self-reported rapport, nor between agreeableness and the disfluency dimension when they interacted with real humans. The conclusions provide fundamental data to further develop a rapport theory that would contribute to evaluating and enhancing the interactional fidelity of an agent on the design of virtual humans for social skills training and therapy.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Graap, Ken; Perlman, Karen; McLay, Robert N.; Rothbaum, Barbara O.; Reger, Greg; Parsons, Thomas D.; Difede, JoAnn; Pair, Jarrell
Virtual Iraq: Initial Results from a VR Exposure Therapy Application for Combat-Related PTSD Journal Article
In: Medicine Meets Virtual Reality, vol. 16, pp. 420–425, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_virtual_2008,
title = {Virtual Iraq: Initial Results from a VR Exposure Therapy Application for Combat-Related PTSD},
author = {Albert Rizzo and Ken Graap and Karen Perlman and Robert N. McLay and Barbara O. Rothbaum and Greg Reger and Thomas D. Parsons and JoAnn Difede and Jarrell Pair},
url = {http://ict.usc.edu/pubs/Virtual%20Iraq-%20Initial%20Results%20from%20a%20VR%20Exposure%20Therapy%20Application%20for%20Combat-Related%20PTSD.pdf},
year = {2008},
date = {2008-01-01},
journal = {Medicine Meets Virtual Reality},
volume = {16},
pages = {420–425},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale and brief description of a Virtual Iraq PTSD VR therapy application and present initial findings from its use with PTSD patients. Thus far, Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Ft. Lewis, Camp Pendleton, Emory University, Weill Cornell Medical College, Walter Reed Army Medical Center, San Diego Naval Medical Center and 12 other sites.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Novielli, Nicole; Carnevale, Peter; Gratch, Jonathan
Cooperation Attitude in Negotiation Dialogs Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC) Workshop on Corpora for Research on Emotion and Affect, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{novielli_cooperation_2008,
title = {Cooperation Attitude in Negotiation Dialogs},
author = {Nicole Novielli and Peter Carnevale and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Cooperation%20Attitude%20in%20Negotiation%20Dialogs.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC) Workshop on Corpora for Research on Emotion and Affect},
abstract = {We propose an annotation scheme for a corpus of negotiation dialogs that was collected in the scope of a study about the effect of negotiation attitudes and time pressure on dialog patterns.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Rizzo, Albert
Evaluation of Justina: A Virtual Patient with PTSD Proceedings Article
In: Lecture Notes in Computer Science, pp. 394–408, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_evaluation_2008,
title = {Evaluation of Justina: A Virtual Patient with PTSD},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Justina-%20A%20Virtual%20Patient%20with%20PTSD.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Lecture Notes in Computer Science},
pages = {394–408},
abstract = {Recent research has established the potential for virtual characters to act as virtual standardized patients VP for the assessment and training of novice clinicians. We hypothesize that the responses of a VP simulating Post Traumatic Stress Disorder (PTSD) in an adolescent female could elicit a number of diagnostic mental health specific questions (from novice clinicians) that are necessary for differential diagnosis of the condition. Composites were developed to reflect the relation between novice clinician questions and VP responses. The primary goal in this study was evaluative: can a VP generate responses that elicit user questions relevant for PTSD categorization? A secondary goal was to investigate the impact of psychological variables upon the resulting VP Question/Response composites and the overall believability of the system.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Gratch, Jonathan; Wang, Ning; Watt, James H.
Does the Contingency of Agents' Nonverbal Feedback Affect Users' Social Anxiety? Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 120–127, Estoril, Portugal, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kang_does_2008,
title = {Does the Contingency of Agents' Nonverbal Feedback Affect Users' Social Anxiety?},
author = {Sin-Hwa Kang and Jonathan Gratch and Ning Wang and James H. Watt},
url = {http://ict.usc.edu/pubs/Does%20the%20Contingency%20of%20Agents%20Nonverbal%20Feedback%20Affect%20Users%20Social%20Anxiety.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {120–127},
address = {Estoril, Portugal},
abstract = {We explored the association between users' social anxiety and the interactional fidelity of an agent (also referred to as a virtual human), specifically addressing whether the contingency of agents' nonverbal feedback affects the relationship between users' social anxiety and their feelings of rapport, performance, or judgment on interaction partners. This subject was examined across four experimental conditions where participants interacted with three different types of agents and a real human. The three types of agents included the Non-Contingent Agent, the Responsive Agent (opposite to the Non-Contingent Agent), and the Mediated Agent (controlled by a real human). The results indicated that people having greater social anxiety would feel less rapport and show worse performance while feeling more embarrassment if they experience the untimely feedback of the Non-Contingent Agent. The results also showed people having more anxiety would trust real humans less as their interaction partners. We discuss the implication of this relationship between social anxiety in a human subject and the interactional fidelity of an agent on the design of virtual characters for social skills training and therapy.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Kok, Iwan; Gratch, Jonathan
Context-based Recognition during Human Interactions: Automatic Feature Selection and Encoding Dictionary Proceedings Article
In: 10th International Conference on Multimodal Interfaces (ICMI 2008), 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{morency_context-based_2008,
title = {Context-based Recognition during Human Interactions: Automatic Feature Selection and Encoding Dictionary},
author = {Louis-Philippe Morency and Iwan Kok and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Context-based%20Recognition%20during%20Human%20Interactions-%20Automatic%20Feature%20Selection%20and%20Encoding%20Dictionary.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {10th International Conference on Multimodal Interfaces (ICMI 2008)},
abstract = {During face-to-face conversation, people use visual feedback such as head nods to communicate relevant information and to synchronize rhythm between participants. In this paperwe describe how contextual information from other participants can be used to predict visual feedback and improve recognition of head gestures in human-human interactions. The main challenges addressed in this paper are optimal feature representation using an encoding dictionary and automatic selection of the optimal feature-encoding pairs. We evaluate our approach on a dataset involving 78 human participants. Using a discriminative approach to multi-modal integration, our context-based recognizer significantly improves head gesture recognition performance over a vision- only recognizer.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
True Emotion vs. Social Intentions in Nonverbal Communication: Towards a Synthesis for Embodied Conversational Agents Proceedings Article
In: Modeling Communication with Robots and Virtual Humans, pp. 181–197, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_true_2008,
title = {True Emotion vs. Social Intentions in Nonverbal Communication: Towards a Synthesis for Embodied Conversational Agents},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/True%20Emotion%20vs%20Social%20Intentions%20in%20Nonverbal%20Communication-%20Towards%20a%20Synthesis%20for%20Embodied%20Conversational%20Agents.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Modeling Communication with Robots and Virtual Humans},
pages = {181–197},
abstract = {Does a facial expression convey privileged information about a person's mental state or is it a communicative act, divorced from "true" beliefs, desires and intentions? This question is often cast as a dichotomy between competing theoretical perspectives. Theorists like Ekman argue for the primacy of emotion as a determinant of nonverbal behavior: emotions "leak" and only indirectly serve social ends. In contrast, theorists such as Fridlund argue for the primacy of social ends in determining nonverbal displays. This dichotomy has worked to divide virtual character research. Whereas there have been advances in modeling emotion, this work is often seen as irrelevant to the generation of communicative behavior. In this chapter, I review current findings on the interpersonal function of emotion. I'll discuss recent developments in Social Appraisal theory as a way to bridge this dichotomy and our attempts to model these functions within the context of embodied conversational agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2007
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Swartout, William; Traum, David; Marsella, Stacy C.; Piepol, Diane
Building Interactive Virtual Humans for Training Environments Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), Orlando, FL, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_building_2007,
title = {Building Interactive Virtual Humans for Training Environments},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and William Swartout and David Traum and Stacy C. Marsella and Diane Piepol},
url = {http://ict.usc.edu/pubs/Building%20Interactive%20Virtual%20Humans%20for%20Training%20Environments.pdf},
year = {2007},
date = {2007-11-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {There is a great need in the Joint Forces to have human to human interpersonal training for skills such as negotiation, leadership, interviewing and cultural training. Virtual environments can be incredible training tools if used properly and used for the correct training application. Virtual environments have already been very successful in training Warfighters how to operate vehicles and weapons systems. At the Institute for Creative Technologies (ICT) we have been exploring a new question: can virtual environments be used to train Warfighters in interpersonal skills such as negotiation, tactical questioning and leadership that are so critical for success in the contemporary operating environment? Using embodied conversational agents to create this type of training system has been one of the goals of the Virtual Humans project at the institute. ICT has a great deal of experience building complex, integrated and immersive training systems that address the human factor needs for training experiences. This paper will address the research, technology and value of developing virtual humans for training environments. This research includes speech recognition, natural language understanding & generation, dialogue management, cognitive agents, emotion modeling, question response managers, speech generation and non-verbal behavior. Also addressed will be the diverse set of training environments we have developed for the system, from single computer laptops to multi-computer immersive displays to real and virtual integrated environments. This paper will also discuss the problems, issues and solutions we encountered while building these systems. The paper will recount subject testing we have performed in these environments and results we have obtained from users. Finally the future of this type of Virtual Humans technology and training applications will be discussed.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jonsdottir, Gudny Ragna; Gratch, Jonathan; Fast, Edward; Thórisson, Kristinn R.
Fluid Semantic Back-Channel Feedback in Dialogue: Challenges & Progress Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jonsdottir_fluid_2007,
title = {Fluid Semantic Back-Channel Feedback in Dialogue: Challenges & Progress},
author = {Gudny Ragna Jonsdottir and Jonathan Gratch and Edward Fast and Kristinn R. Thórisson},
url = {http://ict.usc.edu/pubs/Fluid%20Semantic%20Back-Channel%20Feedback%20in%20Dialogue-%20Challenges%20&%20Progress.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
address = {Paris, France},
abstract = {Participation in natural, real-time dialogue calls for behaviors supported by perception-action cycles from around 100 msec and up. Generating certain kinds of such behaviors, namely envelope feedback, has been possible since the early 90s. Real-time backchannel feedback related to the content of a dialogue has been more difficult to achieve. In this paper we describe our progress in allowing virtual humans to give rapid within-utterance content-specific feedback in real-time dialogue. We present results from human-subject studies of content feedback, where results show that content feedback to a particular phrase or word in human-human dialogue comes 560-2500 msec from the phrase's onset, 1 second on average. We also describe a system that produces such feedback with an autonomous agent in limited topic domains, present performance data of this agent in human-agent interactions experiments and discuss technical challenges in light of the observed human-subject data.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Roque, Antonio; Leuski, Anton; Georgiou, Panayiotis G.; Gerten, Jillian; Martinovski, Bilyana; Narayanan, Shrikanth; Robinson, Susan; Vaswani, Ashish
Hassan: A Virtual Human for Tactical Questioning Proceedings Article
In: 8th SIGdial Workshop on Discourse and Dialogue, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_hassan_2007,
title = {Hassan: A Virtual Human for Tactical Questioning},
author = {David Traum and Antonio Roque and Anton Leuski and Panayiotis G. Georgiou and Jillian Gerten and Bilyana Martinovski and Shrikanth Narayanan and Susan Robinson and Ashish Vaswani},
url = {http://ict.usc.edu/pubs/Hassan-%20A%20Virtual%20Human%20for%20Tactical%20Questioning%20.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {8th SIGdial Workshop on Discourse and Dialogue},
address = {Antwerp, Belgium},
abstract = {We present Hassan, a virtual human who engages in Tactical Questioning dialogues. We describe the tactical questioning domain, the motivation for this character, the speciï¬c architecture and present brief examples and an evaluation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Oh, Sejin; Gratch, Jonathan; Woontack, Woo
Explanatory Style for Socially Interactive Agents Proceedings Article
In: Lecture Notes in Computer Science, Lisbon, Portugal, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{oh_explanatory_2007,
title = {Explanatory Style for Socially Interactive Agents},
author = {Sejin Oh and Jonathan Gratch and Woo Woontack},
url = {http://ict.usc.edu/pubs/Explanatory%20Style%20for%20Socially%20Interactive%20Agents.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Lecture Notes in Computer Science},
address = {Lisbon, Portugal},
abstract = {Recent years have seen an explosion of interest in computational models of socio-emotional processes, both as a mean to deepen understanding of human behavior and as a mechanism to drive a variety of training and entertainment applications. In contrast with work on emotion, where research groups have developed detailed models of emotional processes, models of personality have emphasized shallow surface behavior. Here, we build on computational appraisal models of emotion to better characterize dispositional differences in how people come to understand social situations. Known as explanatory style, this dispositional factor plays a key role in social interactions and certain socio-emotional disorders, such as depression. Building on appraisal and attribution theories, we model key conceptual variables underlying the explanatory style, and enable agents to exhibit different explanatory tendencies according to their personalities. We describe an interactive virtual environment that uses the model to allow participants to explore individual differences in the explanation of social events, with the goal of encouraging the development of perspective taking and emotion-regulatory skills.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
A Model of Compliance and Emotion for Potentially Adversarial Dialogue Agents Proceedings Article
In: 8th SIGdial Workshop on Discourse and Dialogue, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_model_2007,
title = {A Model of Compliance and Emotion for Potentially Adversarial Dialogue Agents},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/A%20Model%20of%20Compliance%20and%20Emotion%20for%20Potentially%20Adversarial%20Dialogue%20%20Agents.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {8th SIGdial Workshop on Discourse and Dialogue},
address = {Antwerp, Belgium},
abstract = {We present a model of compliance, for domains in which a dialogue agent may become adversarial. This model includes a set of emotions and a set of levels of compliance, and strategies for changing these.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ai, Hua; Roque, Antonio; Leuski, Anton; Traum, David
Using Information State to Improve Dialogue Move Identification in a Spoken Dialogue System Proceedings Article
In: Proceedings of the 10th Interspeech Conference, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ai_using_2007,
title = {Using Information State to Improve Dialogue Move Identification in a Spoken Dialogue System},
author = {Hua Ai and Antonio Roque and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Using%20Information%20State%20to%20Improve%20Dialogue%20Move%20Identification%20in%20a%20Spoken%20Dialogue%20System.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {Proceedings of the 10th Interspeech Conference},
address = {Antwerp, Belgium},
abstract = {In this paper we investigate how to improve the performance of a dialogue move and parameter tagger for a taskoriented dialogue system using the information-state approach. We use a corpus of utterances and information states from an implemented system to train and evaluate a tagger, and then evaluate the tagger in an on-line system. Use of information state context is shown to improve performance of the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
Creating Spoken Dialogue Characters from Corpora without Annotations Proceedings Article
In: Interspeech 2007, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_creating_2007,
title = {Creating Spoken Dialogue Characters from Corpora without Annotations},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/Creating%20Spoken%20Dialogue%20Characters%20from%20Corpora%20without%20Annotations%20.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {Interspeech 2007},
address = {Antwerp, Belgium},
abstract = {Virtual humans are being used in a number of applications, including simulation-based training, multi-player games, and museum kiosks. Natural language dialogue capabilities are an essential part of their human-like persona. These dialogue systems have a goal of being believable and generally have to operate within the bounds of their restricted domains. Most dialogue systems operate on a dialogue-act level and require extensive annotation efforts. Semantic annotation and rule authoring have long been known as bottlenecks for developing dialogue systems for new domains. In this paper, we investigate several dialogue models for virtual humans that are trained on an unannotated human-human corpus. These are inspired by information retrieval and work on the surface text level. We evaluate these in text-based and spoken interactions and also against the upper baseline of human-human dialogues.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Roque, Antonio; Vaswani, Ashish; Traum, David; Hernandez, Charles; Millspaugh, Bill
Evaluation of a Spoken Dialogue System for Virtual Reality Call for Fire Training Proceedings Article
In: 10th International Pragmatics Conference, Gotenborg, Sweden, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_evaluation_2007,
title = {Evaluation of a Spoken Dialogue System for Virtual Reality Call for Fire Training},
author = {Susan Robinson and Antonio Roque and Ashish Vaswani and David Traum and Charles Hernandez and Bill Millspaugh},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20a%20Spoken%20Dialogue%20System%20for%20Virtual%20Reality%20Call%20for%20Fire%20Training.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {10th International Pragmatics Conference},
address = {Gotenborg, Sweden},
abstract = {We present an evaluation of a spoken dialogue system that engages in dialogues with soldiers training in an immersive Call for Fire (CFF) simulation. We briefly describe aspects of the Joint Fires and Effects Trainer System, and the Radiobot-CFF dialogue system, which can engage in voice communications with a trainee in call for fire dialogues. An experiment is described to judge performance of the Radiobot CFF system compared with human radio operators. Results show that while the current version of the system is not quite at humanperformance levels, it is already viable for training interaction and as an operator-controller aid.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Traum, David; Marsella, Stacy C.; Swartout, William
The More the Merrier: Multi-Party Negotiation with Virtual Humans Proceedings Article
In: AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence, pp. 1970–1971, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_more_2007,
title = {The More the Merrier: Multi-Party Negotiation with Virtual Humans},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and David Traum and Stacy C. Marsella and William Swartout},
url = {http://ict.usc.edu/pubs/The%20More%20the%20Merrier-%20Multi-Party%20Negotiation%20with%20Virtual%20Humans.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence},
volume = {2},
pages = {1970–1971},
abstract = {The goal of the Virtual Humans Project at the University of Southern California�s Institute for Creative Technologies is to enrich virtual training environments with virtual humans � autonomous agents that support face-to-face interaction with trainees in a variety of roles � through bringing together many different areas of research including speech recognition, natural language understanding, dialogue management, cognitive modeling, emotion modeling, non-verbal behavior and speech and knowledge management. The demo at AAAI will focus on our work using virtual humans to train negotiation skills. Conference attendees will negotiate with a virtual human doctor and elder to try to move a clinic out of harm�s way in single and multi-party negotiation scenarios using the latest iteration of our Virtual Humans framework. The user will use natural speech to talk to the embodied agents, who will respond in accordance with their internal task model and state. The characters will carry out a multi-party dialogue with verbal and non-verbal behavior. A video of a single-party version of the scenario was shown at AAAI-06. This new interactive demo introduces several new features, including multi-party negotiation, dynamically generated non-verbal behavior and a central ontology.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dynamic Movement and Positioning of Embodied Agents in Multiparty Conversations Proceedings Article
In: ACL 2007 Workshop on Embodied Language Processing, Prague, Czech Republic, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_dynamic_2007,
title = {Dynamic Movement and Positioning of Embodied Agents in Multiparty Conversations},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dynamic%20Movement%20and%20Positioning%20of%20Embodied%20Agents%20in%20Multiparty%20%20Conversations.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {ACL 2007 Workshop on Embodied Language Processing},
address = {Prague, Czech Republic},
abstract = {For embodied agents to engage in realistic multiparty conversation, they must stand in appropriate places with respect to other agents and the environment. When these factors change, for example when an agent joins a conversation, the agents must dynamically move to a new location and/or orientation to accommodate. This paper presents an algorithm for simulating the movement of agents based on observed human behavior using techniques developed for pedestrian movement in crowd simulations. We extend a previous group conversation simulation to include an agent motion algorithm. We examine several test cases and show how the simulation generates results that mirror real-life conversation settings.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Rizzo, Albert; Parsons, Thomas D.; Gratch, Jonathan; Swartout, William
A Virtual Human Agent for Training Novice Therapist Clinical Interviewing Skills Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine, Washington D.C., 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2007,
title = {A Virtual Human Agent for Training Novice Therapist Clinical Interviewing Skills},
author = {Patrick G. Kenny and Albert Rizzo and Thomas D. Parsons and Jonathan Gratch and William Swartout},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Human%20Agent%20for%20Training%20Novice%20Therapist%20Clinical%20Interviewing%20Skills.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine},
address = {Washington D.C.},
abstract = {Virtual Reality (VR) is rapidly evolving into a pragmatically usable technology for mental health (MH) applications. Over the last five years, the technology for creating virtual humans (VHs) has evolved to the point where they are no longer regarded as simple background characters, but rather can serve a functional interactional role. Our current project involves the construction of a natural language-capable virtual client named “Justin,” which derived from a military negotiation train- ing tool into a virtual therapy patient for training novice clinicians the art of clinical interviewing with a resistant client. Justin portrays a 16-year old male with a conduct disorder who is being forced to par- ticipate in therapy by his family. The system uses a sophisticated natural language interface that al- lows novice clinicians to practice asking interview questions in an effort to create a positive therapeu- tic alliance with this very challenging virtual client. Herein we proffer a description of our iterative de- sign process and outline our long term vision.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Thagard, Paul; Ditto, Peter; Gratch, Jonathan; Marsella, Stacy C.; Westen, Drew
Emotional Cognition in the Real World Proceedings Article
In: Proceedings of the Twenty-Ninth Annual Meeting of the Cognitive Science Society, Nashville, TN, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{thagard_emotional_2007,
title = {Emotional Cognition in the Real World},
author = {Paul Thagard and Peter Ditto and Jonathan Gratch and Stacy C. Marsella and Drew Westen},
url = {http://ict.usc.edu/pubs/Emotional%20Cognition%20in%20the%20Real%20World.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Proceedings of the Twenty-Ninth Annual Meeting of the Cognitive Science Society},
address = {Nashville, TN},
abstract = {There is increasing appreciation in cognitive science of the impact of emotions on many kinds of thinking, from decision making to scientific discovery. This appreciation has developed in all the fields of cognitive science, including, psychology, philosophy, artificial intelligence, and linguistics, and anthropology. The purpose of the proposed symposium is to report and discuss new investigations of the impact of emotion on cognitive processes, in particular ones that are important in real life situations. We will approach the practical importance of emotional cognition from a variety of disciplinary perspectives: social psychology (Ditto), clinical psychology (Westen), computer science (Gratch and Marsella), and philosophy and neuroscience (Thagard). In order to provide integration across these approaches, we will try to address a fundamental set of questions, including: 1. How do emotions interact with basic cognitive processes? 2. What are the positive contributions of emotions to various kinds of thinking in real world situations? 3. How do emotions sometimes bias thinking in real world situations? 4. How can understanding of the psychology and neuroscience of emotional cognition be used to improve the effectiveness of real world thinking?},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
The Architectural Role of Emotion in Cognitive Systems Book Section
In: Integrated Models of Cognitive Systems, Oxford University Press, New York, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{gratch_architectural_2007,
title = {The Architectural Role of Emotion in Cognitive Systems},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/The%20Architectural%20Role%20of%20Emotion%20in%20Cognitive%20Systems.pdf},
year = {2007},
date = {2007-03-01},
booktitle = {Integrated Models of Cognitive Systems},
publisher = {Oxford University Press},
address = {New York},
abstract = {In this chapter, we will revive an old argument that theories of human emotion can give insight into the design and control of complex cognitive systems. In particular, we claim that appraisal theories of emotion provide essential insight into the influences of emotion over cognition and can help translate such findings into concrete guidance for the design of cognitive systems. Ap- praisal theory claims that emotion plays a central and functional role in sensing external events, characterizing them as opportunity or threats and recruiting the cognitive, physical and social resources needed to adaptively respond. Further, because it argues for a close association be- tween emotion and cognition, the theoretical claims of appraisal theory can be recast as a re- quirement specification for how to build a cognitive system. This specification asserts a set of judgments that must be supported in order to correctly interpret and respond to stimuli and pro- vides a unifying framework for integrating these judgments into a coherent physical or social re- sponse. This chapter elaborates argument in some detail based on our joint experience in build- ing complex cognitive systems and computational models of emotion.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Martinovski, Bilyana; Traum, David; Marsella, Stacy C.
Rejection of empathy in negotiation Journal Article
In: Group Decision and Negotiation, vol. 16, pp. 61–76, 2007, ISSN: 0926-2644.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{martinovski_rejection_2007,
title = {Rejection of empathy in negotiation},
author = {Bilyana Martinovski and David Traum and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Rejection%20of%20empathy%20in%20negotiation.pdf},
issn = {0926-2644},
year = {2007},
date = {2007-01-01},
journal = {Group Decision and Negotiation},
volume = {16},
pages = {61–76},
abstract = {Trust is a crucial quality in the development of individuals and societies and empathy plays a key role in the formation of trust. Trust and empathy have growing importance in studies of negotiation. However, empathy can be rejected which complicates its role in negotiation. This paper presents a linguistic analysis of empathy by focusing on rejection of empathy in negotiation. Some of the rejections are due to failed recognition of the rejector's needs and desires whereas others have mainly strategic functions gaining momentum in the negotiation. In both cases, rejection of empathy is a phase in the negotiation not a breakdown.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Jan, Dusan; Herrera, David; Martinovski, Bilyana; Novick, David; Traum, David
A Computational Model of Culture-Specific Conversational Behavior Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_computational_2007,
title = {A Computational Model of Culture-Specific Conversational Behavior},
author = {Dusan Jan and David Herrera and Bilyana Martinovski and David Novick and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Culture-Specific%20Conversational%20Behavior.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference},
address = {Paris, France},
abstract = {This paper presents a model for simulating cultural differences in the conversational behavior of virtual agents. The model provides parameters for differences in proxemics, gaze and overlap in turn taking.We present a review of literature on these factors and show results of a study where native speakers of North American English, Mexican Spanish and Arabic were asked to rate the realism of the simulations generated based on different cultural parameters with respect to their culture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jina; Marsella, Stacy C.; Traum, David; Gratch, Jonathan; Lance, Brent
The Rickel Gaze Model: A Window on the Mind of a Virtual Human Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 296–303, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{lee_rickel_2007,
title = {The Rickel Gaze Model: A Window on the Mind of a Virtual Human},
author = {Jina Lee and Stacy C. Marsella and David Traum and Jonathan Gratch and Brent Lance},
url = {http://ict.usc.edu/pubs/The%20Rickel%20Gaze%20Model-%20A%20Window%20on%20the%20Mind%20of%20a%20Virtual%20Human.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {296–303},
address = {Paris, France},
abstract = {Gaze plays a large number of cognitive, communicative and affective roles in face-to-face human interaction. To build a believable virtual human, it is imperative to construct a gaze model that generates realistic gaze behaviors. However, it is not enough to merely imitate a person's eye movements. The gaze behaviors should reflect the internal states of the virtual human and users should be able to derive them by observing the behaviors. In this paper, we present a gaze model driven by the cognitive operations; the model processes the virtual human's reasoning, dialog management, and goals to generate behaviors that reflect the agent's inner thoughts. It has been implemented in our virtual human system and operates in real-time. The gaze model introduced in this paper was originally designed and developed by Jeff Rickel but has since been extended by the authors.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Leuski, Anton; Rizzo, Albert
Virtual Patients for Clinical Therapist Skills Training Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 197–210, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2007-1,
title = {Virtual Patients for Clinical Therapist Skills Training},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Anton Leuski and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Patients%20for%20Clinical%20Therapist%20Skills%20Training.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {197–210},
address = {Paris, France},
abstract = {Virtual humans offer an exciting and powerful potential for rich interactive experiences. Fully embodied virtual humans are growing in capability, ease, and utility. As a result, they present an opportunity for expanding research into burgeoning virtual patient medical applications. In this paper we consider the ways in which one may go about building and applying virtual human technology to the virtual patient domain. Specifically we aim to show that virtual human technology may be used to help develop the interviewing and diagnostics skills of developing clinicians. Herein we proffer a description of our iterative design process and preliminary results to show that virtual patients may be a useful adjunct to psychotherapy education.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
Evolving Expression of Emotions through Color in Virtual Humans using Genetic Algorithms Proceedings Article
In: Proceedings of the 1st International Conference on Computational Creativity (ICCC-X), pp. 248–257, 2007, ISBN: 978-989-96001-2-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{de_melo_evolving_2007,
title = {Evolving Expression of Emotions through Color in Virtual Humans using Genetic Algorithms},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evolving%20Expression%20of%20Emotions%20through%20Color%20in%20Virtual%20Humans%20using%20Genetic%20Algorithms.pdf},
isbn = {978-989-96001-2-6},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the 1st International Conference on Computational Creativity (ICCC-X)},
pages = {248–257},
abstract = {For centuries artists have been exploring the formal elements of art (lines, space, mass, light, color, sound, etc.) to express emotions. This paper takes this insight to explore new forms of expression for virtual humans which go beyond the usual bodily, facial and vocal expression channels. In particular, the paper focuses on how to use color to influence the perception of emotions in virtual humans. First, a lighting model and filters are used to manipulate color. Next, an evolutionary model, based on genetic algorithms, is developed to learn novel associations between emotions and color. An experiment is then conducted where non-experts evolve mappings for joy and sadness, without being aware that genetic algorithms are used. In a second experiment, the mappings are analyzed with respect to its features and how general they are. Results indicate that the average fitness increases with each new generation, thus suggesting that people are succeeding in creating novel and useful mappings for the emotions. Moreover, the results show consistent differences between the evolved images of joy and the evolved images of sadness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Wang, Ning; Okhmatovskaia, Anna; Lamothe, Francois; Morales, Mathieu; Werf, R. J.; Morency, Louis-Philippe
Can virtual humans be more engaging than real ones? Proceedings Article
In: Proceedings of the International Conference on Human-Computer Interaction, HCI Intelligent Multimodal Interaction Environments, pp. 286–297, Beijing, China, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_can_2007,
title = {Can virtual humans be more engaging than real ones?},
author = {Jonathan Gratch and Ning Wang and Anna Okhmatovskaia and Francois Lamothe and Mathieu Morales and R. J. Werf and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Can%20virtual%20humans%20be%20more%20engaging%20than%20real%20ones.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the International Conference on Human-Computer Interaction, HCI Intelligent Multimodal Interaction Environments},
pages = {286–297},
address = {Beijing, China},
abstract = {Emotional bonds don't arise from a simple exchange of facial displays, but often emerge through the dynamic give and take of face-to-face interactions. This article explores the phenomenon of rapport, a feeling of connectedness that seems to arise from rapid and contingent positive feedback between partners and is often associated with socio-emotional processes. Rapport has been argued to lead to communicative efficiency, better learning outcomes, improved acceptance of medical advice and successful negotiations. We provide experimental evidence that a simple virtual character that provides positive listening feedback can induce stronger rapport-like effects than face-to-face communication between human partners. Specifically, this interaction can be more engaging to storytellers than speaking to a human audience, as measured by the length and content of their stories.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Graap, Ken; McLay, Robert N.; Perlman, Karen; Rothbaum, Barbara O.; Reger, Greg; Parsons, Thomas D.; Difede, JoAnn; Pair, Jarrell
Virtual Iraq: Initial Case Reports from a VR Exposure Therapy Application for Combat-Related Post Traumatic Stress Disorder Journal Article
In: Virtual Rehabilitation, vol. 27, pp. 124–130, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_virtual_2007,
title = {Virtual Iraq: Initial Case Reports from a VR Exposure Therapy Application for Combat-Related Post Traumatic Stress Disorder},
author = {Albert Rizzo and Ken Graap and Robert N. McLay and Karen Perlman and Barbara O. Rothbaum and Greg Reger and Thomas D. Parsons and JoAnn Difede and Jarrell Pair},
url = {http://ict.usc.edu/pubs/Virtual%20Iraq-%20Initial%20Case%20Reports%20from%20a%20VR%20Exposure%20Therapy%20Application%20for%20Combat-Related%20Post%20Traumatic%20Stress%20Disorder.pdf},
year = {2007},
date = {2007-01-01},
journal = {Virtual Rehabilitation},
volume = {27},
pages = {124–130},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale and brief description of a Virtual Iraq PTSD VR therapy application and present initial findings from two successfully treated patients. The VR treatment environment was created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far, Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center and the results from two successfully treated patients are presented along with a delineation of our future plans for research and clinical care using this application.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Wang, Ning; Gerten, Jillian; Fast, Edward; Duffy, Robin
Creating Rapport with Virtual Agents Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 125–128, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_creating_2007,
title = {Creating Rapport with Virtual Agents},
author = {Jonathan Gratch and Ning Wang and Jillian Gerten and Edward Fast and Robin Duffy},
url = {http://ict.usc.edu/pubs/Creating%20Rapport%20with%20Virtual%20Agents.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {125–128},
address = {Paris, France},
abstract = {Recent research has established the potential for virtual characters to establish rapport with humans through simple contingent nonverbal behaviors. We hypothesized that the contingency, not just the frequency of positive feedback is crucial when it comes to creating rapport. The primary goal in this study was evaluative: can an agent generate behavior that engenders feelings of rapport in human speakers and how does this compare to human generated feedback? A secondary goal was to answer the question: Is contingency (as opposed to frequency) of agent feedback crucial when it comes to creating feelings of rapport? Results suggest that contingency matters when it comes to creating rapport and that agent generated behavior was as good as human listeners in creating rapport. A "virtual human listener" condition performed worse than other conditions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
First Steps Towards Dialogue Modelling from an Un-annotated Human-Human Corpus Proceedings Article
In: 5th Workshop on Knowledge and Reasoning in Practical Dialogue Systems, Hyderabad, India, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_first_2007,
title = {First Steps Towards Dialogue Modelling from an Un-annotated Human-Human Corpus},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/First%20Steps%20towards%20Dialogue%20Modelling%20from%20an%20Un-annotated%20Human-Human%20Corpus.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {5th Workshop on Knowledge and Reasoning in Practical Dialogue Systems},
address = {Hyderabad, India},
abstract = {Virtual human characters equipped with natural language dialogue capability have proved useful in many fields like simulation training and interactive games. Generally behind such dialogue managers lies a complex knowledge-rich rule-based system. Building such system involves meticulous annotation of data and hand autoring of rules. In this paper we build a statistical dialogue model from roleplay and wizard of oz dialog corpus with virtually no annotation. We compare these methods with the tra ditional approaches. We have evaluated these systems for perceived appropriateness of response and the results are presented here.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2006
Core, Mark; Traum, David; Lane, H. Chad; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan; Lent, Michael
Teaching Negotiation Skills through Practice and Reflection with Virtual Humans Journal Article
In: Simulation: Transactions of the Society for Modeling and Simulation, vol. 82, no. 11, pp. 685–701, 2006.
Abstract | Links | BibTeX | Tags: Learning Sciences, Social Simulation, Virtual Humans
@article{core_teaching_2006,
title = {Teaching Negotiation Skills through Practice and Reflection with Virtual Humans},
author = {Mark Core and David Traum and H. Chad Lane and William Swartout and Stacy C. Marsella and Jonathan Gratch and Michael Lent},
url = {http://ict.usc.edu/pubs/Teaching%20Negotiation%20Skills.pdf},
year = {2006},
date = {2006-11-01},
journal = {Simulation: Transactions of the Society for Modeling and Simulation},
volume = {82},
number = {11},
pages = {685–701},
abstract = {Although the representation of physical environments and behaviors will continue to play an important role in simulation-based training, an emerging challenge is the representation of virtual humans with rich mental models (e.g., including emotions, trust) that interact through conversational as well as physical behaviors. The motivation for such simulations is training soft skills such as leadership, cultural awareness, and negotiation, where the majority of actions are conversational, and the problem solving involves consideration of the emotions, attitudes, and desires of others.The educational power of such simulations can be enhanced by the integration of an intelligent tutoring system to support learners� understanding of the effect of their actions on virtual humans and how they might improve their performance. In this paper, we discuss our efforts to build such virtual humans, along with an accompanying intelligent tutor, for the domain of negotiation and cultural awareness.},
keywords = {Learning Sciences, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Okhmatovskaia, Anna; Duncan, Susan
Virtual Humans for the Study of Rapport in Cross Cultural Settings Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_virtual_2006,
title = {Virtual Humans for the Study of Rapport in Cross Cultural Settings},
author = {Jonathan Gratch and Anna Okhmatovskaia and Susan Duncan},
url = {http://ict.usc.edu/pubs/VIRTUAL%20HUMANS%20FOR%20THE%20STUDY%20OF%20RAPPORT%20IN%20CROSS%20CULTURAL%20SETTINGS.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {As an increasing part of the Army's mission involves establishing rapport with diverse populations, training interpersonal skills becomes critically important. Here we describe a "Rapport Agent" that senses and responds to a speakerQs nonverbal behavior and provide empirical evidence that it increases speaker fluency and engagement. We argue such agent technology has potential, both as a training system to enhance communication skills, and to assess the key factors that influence rapport in face-to-face interactions. We conclude by discussing ways the nonverbal correlates of rapport vary between Arabic and English speakers and discuss the potential of such technology to advance research and training into rapport in cross-cultural settings.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Parsons, Thomas D.; Liewer, Matt; Graap, Ken; Difede, JoAnn; Rothbaum, Barbara O.; Reger, Greg; Roy, Michael
A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_virtual_2006-1,
title = {A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder},
author = {Albert Rizzo and Jarrell Pair and Thomas D. Parsons and Matt Liewer and Ken Graap and JoAnn Difede and Barbara O. Rothbaum and Greg Reger and Michael Roy},
url = {http://ict.usc.edu/pubs/A%20VIRTUAL%20REALITY%20THERAPY%20APPLICATION%20FOR%20OEF%20OIF%20COMBAT-RELATED%20POST%20TRAUMATIC%20STRESS%20DISORDER.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Kennedy, Brandon; Patel, Ronakkumar; Traum, David
Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be? Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_asking_2006,
title = {Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be?},
author = {Anton Leuski and Brandon Kennedy and Ronakkumar Patel and David Traum},
url = {http://ict.usc.edu/pubs/Asking%20Questions%20to%20Limited%20Domain%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper, we describe the evaluation of a limited domain question-answering characters, particularly as to the effect of non-optimal speech recognition, and the ability to appropriately answer novel questions. Results show that answering ability is robust until speech recognition reaches over 60% Word error rate.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Kim, Julia; Zbylut, MIchelle L.; Gordon, Andrew S.; Traum, David; Gandhe, Sudeep; King, Stewart; Lavis, Salvo; Rocher, Scott
AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{hill_axlnet_2006,
title = {AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders},
author = {Randall W. Hill and Julia Kim and MIchelle L. Zbylut and Andrew S. Gordon and David Traum and Sudeep Gandhe and Stewart King and Salvo Lavis and Scott Rocher},
url = {http://ict.usc.edu/pubs/AXLNet-%20Web-enabled%20Case%20Method%20Instruction%20for%20Accelerating%20Tacit%20Knowledge%20Acquisition%20in%20Leaders.PDF},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {AXL.Net is a prototype web-based immersive technology solution that supports case method teaching for U.S. Army leader development. The AXL.Net system addresses three challenges: (1) designing a pedagogicallysound research prototype for leader development, (2) integrating research technologies with the best of Web 2.0 innovations to enhance case method teaching, and (3) providing an easy to use system. Initial evaluations show that the prototype application and framework is effective for leader development.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Leuski, Anton; Rangarajan, Vivek; Robinson, Susan; Vaswani, Ashish; Narayanan, Shrikanth; Traum, David
Radiobot-CFF: A Spoken Dialogue System for Military Training Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_radiobot-cff_2006,
title = {Radiobot-CFF: A Spoken Dialogue System for Military Training},
author = {Antonio Roque and Anton Leuski and Vivek Rangarajan and Susan Robinson and Ashish Vaswani and Shrikanth Narayanan and David Traum},
url = {http://ict.usc.edu/pubs/Radiobot-CFF-%20A%20Spoken%20Dialogue%20System%20for%20Military%20Training.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {We describe a spoken dialogue system which can engage in Call For Fire (CFF) radio dialogues to help train soldiers in proper procedures for requesting artillery fire missions. We describethe domain, an information-state dialogue manager with a novel system of interactive information components, and provide evaluation results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tepperman, Joseph; Traum, David; Narayanan, Shrikanth
"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{tepperman_yeah_2006,
title = {"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems},
author = {Joseph Tepperman and David Traum and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Yeah%20Right-%20Sarcasm%20Recognition%20for%20Spoken%20Dialogue%20Systems.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {The robust understanding of sarcasm in a spoken dialogue system requires a reformulation of the dialogue manager's basic assumptions behind, for example, user behavior and grounding strategies. But automatically detecting a sarcastic tone of voice is not a simple matter. This paper presents some experiments toward sarcasm recognition using prosodic, spectral, and contextual cues. Our results demonstrate that spectral and contextual features can be used to detect sarcasm as well as a human annotator would, and confirm a long-held claim in the field of psychology — that prosody alone is not sufficient to discern whether a speaker is being sarcastic.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Ronakkumar; Leuski, Anton; Traum, David
Dealing with Out of Domain Questions in Virtual Characters Proceedings Article
In: Proceedings of the 6th International Conference on Intelligent Virtual Agents, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_dealing_2006,
title = {Dealing with Out of Domain Questions in Virtual Characters},
author = {Ronakkumar Patel and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Out%20of%20Domain%20Questions%20in%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 6th International Conference on Intelligent Virtual Agents},
address = {Marina del Rey, CA},
abstract = {We consider the problem of designing virtual characters that support speech-based interactions in a limited domain. Previously we have shown that classification can be an effective and robust tool for selecting appropriate in-domain responses. In this paper, we consider the problem of dealing with out-of-domain user questions. We introduce a taxonomy of out-of-domain response types. We consider three classification architectures for selecting the most appropriate out-of-domain responses. We evaluate these architectures and show that they significantly improve the quality of the response selection making the user?s interaction with the virtual character more natural and engaging.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gluck, Kevin A.; Gunzelmann, Glenn; Gratch, Jonathan; Hudlicka, Eva; Ritter, Frank E.
Modeling the Impact of Cognitive Moderators on Human Cognition and Performance Proceedings Article
In: Proceedings of the 2006 Conference of the Cognitive Society, pp. 2658, Vancouver, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gluck_modeling_2006,
title = {Modeling the Impact of Cognitive Moderators on Human Cognition and Performance},
author = {Kevin A. Gluck and Glenn Gunzelmann and Jonathan Gratch and Eva Hudlicka and Frank E. Ritter},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Impact%20of%20Cognitive%20Moderators%20on%20Human%20Cognition%20and%20Performance.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 2006 Conference of the Cognitive Society},
pages = {2658},
address = {Vancouver, CA},
abstract = {Cognitive moderators, such as emotions, personality, stress, and fatigue, represent an emerging area of research within the cognitive science community and are increasingly acknowledged as important and ubiquitous influences on cognitive processes. This symposium brings together scientists engaged in research to develop models that help us better understand the mechanisms through which these factors impact human cognition and performance. There are two unifying themes across the presentations. One theme is a commitment to developing computational models useful for simulating the processes that produce the effects and phenomena of interest. The second theme is a commitment to assessing the validity of the models by comparing their performance against empirical human data.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}