Publications
Search
Gandhe, Sudeep; Traum, David
A Semi-automated Evaluation Metric for Dialogue Model Coherence Book Section
In: Situated Dialog in Speech-Based Human-Computer Interaction, pp. 217–225, Springer International Publishing, Cham, 2016, ISBN: 978-3-319-21833-5 978-3-319-21834-2.
@incollection{gandhe_semi-automated_2016,
title = {A Semi-automated Evaluation Metric for Dialogue Model Coherence},
author = {Sudeep Gandhe and David Traum},
url = {http://link.springer.com/10.1007/978-3-319-21834-2_19},
isbn = {978-3-319-21833-5 978-3-319-21834-2},
year = {2016},
date = {2016-04-01},
booktitle = {Situated Dialog in Speech-Based Human-Computer Interaction},
pages = {217–225},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {We propose a new metric, Voted Appropriateness, which can be used to automatically evaluate dialogue policy decisions, once some wizard data has been collected. We show that this metric outperforms a previously proposed metric Weak agreement.We also present a taxonomy for dialogue model evaluation schemas, and orient our new metric within this taxonomy.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
RIZZO, Albert; LUCAS, Gale; GRATCH, Jonathan; STRATOU, Giota; MORENCY, Louis-Philippe; CHAVEZ, Kenneth; SHILLING, Russ; SCHERER, Stefan
Automatic Behavior Analysis During a Clinical Interview with a Virtual Human. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 316–322, 2016.
@article{rizzo_automatic_2016,
title = {Automatic Behavior Analysis During a Clinical Interview with a Virtual Human.},
author = {Albert RIZZO and Gale LUCAS and Jonathan GRATCH and Giota STRATOU and Louis-Philippe MORENCY and Kenneth CHAVEZ and Russ SHILLING and Stefan SCHERER},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA316&dq=%22captured+across+a+20+minute+interview.+Results+from+of+sample+of+service%22+%22technology+for+clinical+purposes.+Recent+shifts+in+the+social+and%22+%22needed+to+create+VH+systems+is+now+driving+application+development+across%22+&ots=Ej8M4iuPfb&sig=Ad6Z3DPSwN3qA2gMDKWPe1YTPhg},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {316–322},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. Results from of sample of service members (SMs) who were interviewed before and after a deployment to Afghanistan indicate that SMs reveal more PTSD symptoms to the VH than they report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and few happy expressions at post deployment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Georgila, Kallirroi; Pynadath, David V.
Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events Proceedings Article
In: Proceedings of The 29th International FLAIRS Conference, pp. 44–49, AAAI Press, Key Largo, FL, 2016.
@inproceedings{georgila_towards_2016,
title = {Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events},
author = {Kallirroi Georgila and David V. Pynadath},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12960/12539},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of The 29th International FLAIRS Conference},
pages = {44–49},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Accurate multiagent social simulation requires a computational model of how people incorporate their observations of real-world events into their beliefs about the state of their world. Current methods for creating such agent-based models typically rely on manual input that can be both burdensome and subjective. In this investigation, we instead pursue automated methods that can translate available data into the desired computational models. For this purpose, we use a corpus of real-world events in combination with longitudinal public opinion polls on a variety of opinion issues. We perform two experiments using automated methods taken from the literature. In our first experiment, we train maximum entropy classifiers to model changes in opinion scores as a function of real-world events. We measure and analyze the accuracy of our learned classifiers by comparing the opinion scores they generate against the opinion scores occurring in a held-out subset of our corpus. In our second experiment, we learn Bayesian networks to capture the same function.We then compare the dependency structures induced by the two methods to identify the event features that have the most significant effect on changes in public opinion.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Silver, Kenneth
Ethics for a Combined Human-Machine Dialogue Agent Proceedings Article
In: Ethical and Moral Considerations in Non-Human Agents: Papers from the AAAI Spring Symposium, pp. 184–189, AAAI Press, Stanford, California, 2016.
@inproceedings{artstein_ethics_2016,
title = {Ethics for a Combined Human-Machine Dialogue Agent},
author = {Ron Artstein and Kenneth Silver},
url = {http://www.aaai.org/ocs/index.php/SSS/SSS16/paper/viewFile/12706/11948},
year = {2016},
date = {2016-03-01},
booktitle = {Ethical and Moral Considerations in Non-Human Agents: Papers from the AAAI Spring Symposium},
pages = {184–189},
publisher = {AAAI Press},
address = {Stanford, California},
abstract = {We discuss philosophical and ethical issues that arise from a dialogue system intended to portray a real person, using recordings of the person together with a machine agent that selects recordings during a synchronous conversation with a user. System output may count as actions of the speaker if the speaker intends to communicate with users and the outputs represent what the speaker would have chosen to say in context; in such cases the system can justifiably be said to be holding a conversation that is offset in time. The autonomous agent may at times misrepresent the speaker’s intentions, and such failures are analogous to good-faith misunderstandings. The user may or may not need to be informed that the speaker is not organically present, depending on the application.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan
Multimodal Behavior Analytics for Interactive Technologies Journal Article
In: KI - Künstliche Intelligenz, vol. 30, no. 1, pp. 91–92, 2016, ISSN: 0933-1875, 1610-1987.
@article{scherer_multimodal_2016,
title = {Multimodal Behavior Analytics for Interactive Technologies},
author = {Stefan Scherer},
url = {http://download.springer.com/static/pdf/790/art%253A10.1007%252Fs13218-015-0401-0.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Farticle%2F10.1007%2Fs13218-015-0401-0&token2=exp=1474903610 acl=%2Fstatic%2Fpdf%2F790%2Fart%25253A10.1007%25252Fs13218-015-0401-0.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Farticle%252F10.1007%252Fs13218-015-0401-0* hmac=8e31601212e82ac3ea1341f6bbddc376f14d6833e9b1df0adff03a332bb17122},
doi = {10.1007/s13218-015-0401-0},
issn = {0933-1875, 1610-1987},
year = {2016},
date = {2016-02-01},
journal = {KI - Künstliche Intelligenz},
volume = {30},
number = {1},
pages = {91–92},
abstract = {Human communication is multifaceted and information between humans is communicated on many channels in parallel. In order for a machine to become an efficient and accepted social companion, it is important that the machine understands interactive cues that not only represent direct communicative information such as spoken words but also nonverbal behavior. Hence, technologies to understand and put nonverbal communication into the context of the present interaction are essential for the advancement of human-machine interfaces [3, 4]. Multimodal behavior analytics—a transdisciplinary field of research—aims to close this gap and enables machines to automatically identify, characterize, model, and synthesize individuals’ multimodal nonverbal behavior within both human-machine as well as machine-mediated humanhuman interaction. The emerging technology of this field is relevant for a wide range of interaction applications, including but not limited to the areas of healthcare and education. Exemplarily, the characterization and association of nonverbal behavior with underlying clinical conditions, such as depression or post-traumatic stress, holds transformative potential and could change treatment and the healthcare systems efficiency significantly [6]. Within the educational context the assessment of proficiency and expertise of individuals’ social skills, in particular for those with learning disabilities or social anxiety, can help create individualized education scenarios [2, 8]. The potential of machine-assisted training for individuals with autism spectrum disorders (ASD) for example could have far reaching impacts on our society. In the following, I highlight two behavior analytics approaches that were investigated in my PhD dissertation [3] and summarized in a multimodal framework for human behavior analysis [4].},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Scherer, Stefan; Lucas, Gale M.; Gratch, Jonathan; Rizzo, Albert Skip; Morency, Louis-Philippe
Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews Journal Article
In: IEEE Transactions on Affective Computing, vol. 7, no. 1, pp. 59–73, 2016, ISSN: 1949-3045.
@article{scherer_self-reported_2016,
title = {Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews},
author = {Stefan Scherer and Gale M. Lucas and Jonathan Gratch and Albert Skip Rizzo and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/document/7117386/?arnumber=7117386},
doi = {10.1109/TAFFC.2015.2440264},
issn = {1949-3045},
year = {2016},
date = {2016-01-01},
journal = {IEEE Transactions on Affective Computing},
volume = {7},
number = {1},
pages = {59–73},
abstract = {Reduced frequency range in vowel production is a well documented speech characteristic of individuals’ with psychological and neurological disorders. Affective disorders such as depression and post-traumatic stress disorder (PTSD) are known to influence motor control and in particular speech production. The assessment and documentation of reduced vowel space and reduced expressivity often either rely on subjective assessments or on analysis of speech under constrained laboratory conditions (e.g.sustained vowel production, reading tasks). These constraints render the analysis of such measures expensive and impractical. Within this work, we investigate an automatic unsupervised machine learning based approach to assess a speaker’s vowel space. Our experiments are based on recordings of 253 individuals. Symptoms of depression and PTSD are assessed using standard self-assessment questionnaires and their cut-off scores. The experiments show a significantly reduced vowel space in subjects that scored positively on the questionnaires. We show the measure’s statistical robustness against varying demographics of individuals and articulation rate. The reduced vowel space for subjects with symptoms of depression can be explained by the common condition of psychomotor retardation influencing articulation and motor control. These findings could potentially support treatment of affective disorders, like depression and PTSD in the future.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Venek, Verena; Scherer, Stefan; Morency, Louis-Philippe; Rizzo, Albert; Pestian, John
Adolescent Suicidal Risk Assessment in Clinician-Patient Interaction Journal Article
In: IEEE Transactions on Affective Computing, vol. PP, no. 99, 2016, ISSN: 1949-3045.
@article{venek_adolescent_2016,
title = {Adolescent Suicidal Risk Assessment in Clinician-Patient Interaction},
author = {Verena Venek and Stefan Scherer and Louis-Philippe Morency and Albert Rizzo and John Pestian},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7384418},
doi = {10.1109/TAFFC.2016.2518665},
issn = {1949-3045},
year = {2016},
date = {2016-01-01},
journal = {IEEE Transactions on Affective Computing},
volume = {PP},
number = {99},
abstract = {Youth suicide is a major public health problem. It is the third leading cause of death in the United States for ages 13 through 18. Many adolescents that face suicidal thoughts or make a suicide plan never seek professional care or help. Within this work, we evaluate both verbal and nonverbal responses to a five-item ubiquitous questionnaire to identify and assess suicidal risk of adolescents. We utilize a machine learning approach to identify suicidal from non-suicidal speech as well as characterize adolescents that repeatedly attempted suicide in the past. Our findings investigate both verbal and nonverbal behavior information of the face-to-face clinician-patient interaction. We investigate 60 audio-recorded dyadic clinician-patient interviews of 30 suicidal (13 repeaters and 17 non-repeaters) and 30 non-suicidal adolescents. The interaction between clinician and adolescents is statistically analyzed to reveal differences between suicidal vs. non-suicidal adolescents and to investigate suicidal repeaters’ behaviors in comparison to suicidal non-repeaters. By using a hierarchical classifier we were able to show that the verbal responses to the ubiquitous questions sections of the interviews were useful to discriminate suicidal and non-suicidal patients. However, to additionally classify suicidal repeaters and suicidal non-repeaters more information especially nonverbal information is required.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kaplan, Jonas T.; Gimbel, Sarah I.; Dehghani, Morteza; Immordino-Yang, Mary Helen; Sagae, Kenji; Wong, Jennifer D.; Tipper, Christine M.; Damasio, Hanna; Gordon, Andrew S.; Damasio, Antonio
Processing Narratives Concerning Protected Values: A Cross-Cultural Investigation of Neural Correlates Journal Article
In: Cerebral Cortex, 2016, ISSN: 1047-3211, 1460-2199.
@article{kaplan_processing_2016,
title = {Processing Narratives Concerning Protected Values: A Cross-Cultural Investigation of Neural Correlates},
author = {Jonas T. Kaplan and Sarah I. Gimbel and Morteza Dehghani and Mary Helen Immordino-Yang and Kenji Sagae and Jennifer D. Wong and Christine M. Tipper and Hanna Damasio and Andrew S. Gordon and Antonio Damasio},
url = {http://www.cercor.oxfordjournals.org/lookup/doi/10.1093/cercor/bhv325},
doi = {10.1093/cercor/bhv325},
issn = {1047-3211, 1460-2199},
year = {2016},
date = {2016-01-01},
journal = {Cerebral Cortex},
abstract = {Narratives are an important component of culture and play a central role in transmitting social values. Little is known, however, about how the brain of a listener/reader processes narratives. A receiver's response to narration is influenced by the narrator's framing and appeal to values. Narratives that appeal to “protected values,” including core personal, national, or religious values, may be particularly effective at influencing receivers. Protected values resist compromise and are tied with identity, affective value, moral decision-making, and other aspects of social cognition. Here, we investigated the neural mechanisms underlying reactions to protected values in narratives. During fMRI scanning, we presented 78 American, Chinese, and Iranian participants with real-life stories distilled from a corpus of over 20 million weblogs. Reading these stories engaged the posterior medial, medial prefrontal, and temporo-parietal cortices. When participants believed that the protagonist was appealing to a protected value, signal in these regions was increased compared with when no protected value was perceived, possibly reflecting the intensive and iterative search required to process this material. The effect strength also varied across groups, potentially reflecting cultural differences in the degree of concern for protected values.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ustun, Volkan; Rosenbloom, Paul S.; Kim, Julia; Li, Lingshan
BUILDING HIGH FIDELITY HUMAN BEHAVIOR MODELS IN THE SIGMA COGNITIVE ARCHITECTURE Proceedings Article
In: Proceedings of the 2015 Winter Simulation Conference, pp. 3124–3125, IEEE, Huntington Beach, CA, 2015, ISBN: 978-1-4673-9741-4.
@inproceedings{ustun_building_2015,
title = {BUILDING HIGH FIDELITY HUMAN BEHAVIOR MODELS IN THE SIGMA COGNITIVE ARCHITECTURE},
author = {Volkan Ustun and Paul S. Rosenbloom and Julia Kim and Lingshan Li},
url = {http://dl.acm.org/citation.cfm?id=2888619.2888999},
isbn = {978-1-4673-9741-4},
year = {2015},
date = {2015-12-01},
booktitle = {Proceedings of the 2015 Winter Simulation Conference},
pages = {3124–3125},
publisher = {IEEE},
address = {Huntington Beach, CA},
abstract = {Many agent simulations involve computational models of intelligent human behavior. In a variety of cases, these behavior models should be high-fidelity to provide the required realism and credibility. Cognitive architectures may assist the generation of such high-fidelity models as they specify the fixed structure underlying an intelligent cognitive system that does not change over time and across domains. Existing symbolic architectures, such as Soar and ACT-R, have been used in this way, but here the focus is on a new architecture, Sigma (!), that leverages probabilistic graphical models towards a uniform grand unification of not only the traditional cognitive capabilities but also key non-cognitive aspects, and which thus yields unique opportunities for construction of new kinds of non-modular high-fidelity behavior models. Here, we briefly introduce Sigma along with two disparate proof-of-concept virtual humans – one conversational and the other a pair of ambulatory agents – that demonstrate its diverse capabilities.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Jones, Andrew; Hays, Kia; Maio, Heather; Alexander, Oleg; Artstein, Ron; Debevec, Paul; Gainer, Alesia; Georgila, Kallirroi; Haase, Kathleen; Jungblut, Karen; Leuski, Anton; Smith, Stephen; Swartout, William
New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling Book Section
In: Interactive Storytelling, vol. 9445, pp. 269–281, Springer International Publishing, Copenhagen, Denmark, 2015, ISBN: 978-3-319-27035-7 978-3-319-27036-4.
@incollection{traum_new_2015,
title = {New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling},
author = {David Traum and Andrew Jones and Kia Hays and Heather Maio and Oleg Alexander and Ron Artstein and Paul Debevec and Alesia Gainer and Kallirroi Georgila and Kathleen Haase and Karen Jungblut and Anton Leuski and Stephen Smith and William Swartout},
url = {http://link.springer.com/10.1007/978-3-319-27036-4_26},
isbn = {978-3-319-27035-7 978-3-319-27036-4},
year = {2015},
date = {2015-12-01},
booktitle = {Interactive Storytelling},
volume = {9445},
pages = {269–281},
publisher = {Springer International Publishing},
address = {Copenhagen, Denmark},
abstract = {We describe a digital system that allows people to have an interactive conversation with a human storyteller (a Holocaust survivor) who has recorded a number of dialogue contributions, including many compelling narratives of his experiences and thoughts. The goal is to preserve as much as possible of the experience of face-to-face interaction. The survivor's stories, answers to common questions, and testimony are recorded in high ⬚delity, and then delivered interactively to an audience as responses to spoken questions. People can ask questions and receive answers on a broad range of topics including the survivor's experiences before, after and during the war, his attitudes and philosophy. Evaluation results show that most user questions can be addressed by the system, and that audiences are highly engaged with the resulting interaction.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Chatterjee, Moitreya; Park, Sunghyun; Morency, Louis-Philippe; Scherer, Stefan
Combining Two Perspectives on Classifying Multimodal Data for Recognizing Speaker Traits Proceedings Article
In: Proceedings of the 2015 ACM on International Conference on Multimodal Interaction, pp. 7–14, ACM Press, Seattle, Washington, 2015, ISBN: 978-1-4503-3912-4.
@inproceedings{chatterjee_combining_2015,
title = {Combining Two Perspectives on Classifying Multimodal Data for Recognizing Speaker Traits},
author = {Moitreya Chatterjee and Sunghyun Park and Louis-Philippe Morency and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2818346.2820747},
doi = {10.1145/2818346.2820747},
isbn = {978-1-4503-3912-4},
year = {2015},
date = {2015-11-01},
booktitle = {Proceedings of the 2015 ACM on International Conference on Multimodal Interaction},
pages = {7–14},
publisher = {ACM Press},
address = {Seattle, Washington},
abstract = {Human communication involves conveying messages both through verbal and non-verbal channels (facial expression, gestures, prosody, etc.). Nonetheless, the task of learning these patterns for a computer by combining cues from multiple modalities is challenging because it requires effective representation of the signals and also taking into consideration the complex interactions between them. From the machine learning perspective this presents a two-fold challenge: a) Modeling the intermodal variations and dependencies; b) Representing the data using an apt number of features, such that the necessary patterns are captured but at the same time allaying concerns such as over-fitting. In this work we attempt to address these aspects of multimodal recognition, in the context of recognizing two essential speaker traits, namely passion and credibility of online movie reviewers. We propose a novel ensemble classification approach that combines two different perspectives on classifying multimodal data. Each of these perspectives attempts to independently address the two-fold challenge. In the first, we combine the features from multiple modalities but assume inter-modality conditional independence. In the other one, we explicitly capture the correlation between the modalities but in a space of few dimensions and explore a novel clustering based kernel similarity approach for recognition. Additionally, this work investigates a recent technique for encoding text data that captures semantic similarity of verbal content and preserves word-ordering. The experimental results on a recent public dataset shows significant improvement of our approach over multiple baselines. Finally, we also analyze the most discriminative elements of a speaker's non-verbal behavior that contribute to his/her perceived credibility/passionateness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Casas, Dan; Shapiro, Ari
Avatar Reshaping and Automatic Rigging Using a Deformable Model Proceedings Article
In: Proceedings of the 8th ACM SIGGRAPH Conference on Motion in Games, pp. 57–64, ACM Press, Paris, France, 2015, ISBN: 978-1-4503-3991-9.
@inproceedings{feng_avatar_2015,
title = {Avatar Reshaping and Automatic Rigging Using a Deformable Model},
author = {Andrew Feng and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2822013.2822017},
doi = {10.1145/2822013.2822017},
isbn = {978-1-4503-3991-9},
year = {2015},
date = {2015-11-01},
booktitle = {Proceedings of the 8th ACM SIGGRAPH Conference on Motion in Games},
pages = {57–64},
publisher = {ACM Press},
address = {Paris, France},
abstract = {3D scans of human figures have become widely available through online marketplaces and have become relatively easy to acquire using commodity scanning hardware. In addition to static uses of such 3D models, such as 3D printed figurines or rendered 3D still imagery, there are numerous uses for an animated 3D character that uses such 3D scan data. In order to effectively use such models as dynamic 3D characters, the models must be properly rigged before they are animated. In this work, we demonstrate a method to automatically rig a 3D mesh by matching a set of morphable models against the 3D scan. Once the morphable model has been matched against the 3D scan, the skeleton position and skinning attributes are then copied, resulting in a skinning and rigging that is similar in quality to the original hand-rigged model. In addition, the use of a morphable model allows us to reshape and resize the 3D scan according to approximate human proportions. Thus, a human 3D scan can be modified to be taller, shorter, fatter or skinnier. Such manipulations of the 3D scan are useful both for social science research, as well as for visualization for applications such as fitness, body image, plastic surgery and the like.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Papaefthymiou, Margarita; Feng, Andrew; Shapiro, Ari; Papagiannakis, George
A fast and robust pipeline for populating mobile AR scenes with gamified virtual characters Proceedings Article
In: SIGGRAPH Asia 2015, pp. 1–8, ACM Press, Kobe, Japan, 2015, ISBN: 978-1-4503-3928-5.
@inproceedings{papaefthymiou_fast_2015,
title = {A fast and robust pipeline for populating mobile AR scenes with gamified virtual characters},
author = {Margarita Papaefthymiou and Andrew Feng and Ari Shapiro and George Papagiannakis},
url = {http://dl.acm.org/citation.cfm?doid=2818427.2818463},
doi = {10.1145/2818427.2818463},
isbn = {978-1-4503-3928-5},
year = {2015},
date = {2015-11-01},
booktitle = {SIGGRAPH Asia 2015},
pages = {1–8},
publisher = {ACM Press},
address = {Kobe, Japan},
abstract = {In this work we present a complete methodology for robust authoring of AR virtual characters powered from a versatile character animation framework (Smartbody), using only mobile devices. We can author, fully augment with life-size, animated, geometrically accurately registered virtual characters into any open space in less than 1 minute with only modern smartphones or tablets and then automatically revive this augmentation for subsequent activations from the same spot, in under a few seconds. Also, we handle efficiently scene authoring rotations of the AR objects using Geometric Algebra rotors in order to extract higher quality visual results. Moreover, we have implemented a mobile version of the global illumination for real-time Precomputed Radiance Transfer algorithm for diffuse shadowed characters in real-time, using High Dynamic Range (HDR) environment maps integrated in our opensource OpenGL Geometric Application (glGA) framework. Effective character interaction plays fundamental role in attaining high level of believability and makes the AR application more attractive and immersive based on the SmartBody framework.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Scherer, Stefan; Oiumette, Brett; Ryan, William S.; Lance, Brent J.; Gratch, Jonathan
Computational-based behavior analysis and peripheral psychophysiology Journal Article
In: Advances in Computational Psychophysiology, pp. 34–36, 2015.
@article{khooshabeh_computational-based_2015,
title = {Computational-based behavior analysis and peripheral psychophysiology},
author = {Peter Khooshabeh and Stefan Scherer and Brett Oiumette and William S. Ryan and Brent J. Lance and Jonathan Gratch},
url = {http://www.sciencemag.org/sites/default/files/custom-publishing/documents/CP_Supplement_Final_100215.pdf},
year = {2015},
date = {2015-10-01},
journal = {Advances in Computational Psychophysiology},
pages = {34–36},
abstract = {Computational-based behavior analysis aims to automatically identify, characterize, model, and synthesize multimodal nonverbal behavior within both human–machine as well as machine-mediated human–human interaction. It uses state-of-the-art machine learning algorithms to track human nonverbal and verbal information, such as facial expressions, gestures, and posture, as well as what and how a person speaks. The emerging technology from this field of research is relevant for a wide range of interactive and social applications, including health care and education. The characterization and association of nonverbal behavior with underlying clinical conditions, such as depression or posttraumatic stress, could have significant benefits for treatments and the overall efficiency of the health care system.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chatterjee, Moitreya; Leuski, Anton
A Novel Statistical Approach for Image and Video Retrieval and Its Adaption for Active Learning Book Section
In: A Novel Statistical Approach for Image and Video Retrieval and Its Adaption for Active Learning, pp. 935–938, ACM, Brisbane, Australia, 2015, ISBN: 978-1-4503-3459-4.
@incollection{chatterjee_novel_2015,
title = {A Novel Statistical Approach for Image and Video Retrieval and Its Adaption for Active Learning},
author = {Moitreya Chatterjee and Anton Leuski},
url = {http://dl.acm.org/citation.cfm?id=2806368},
isbn = {978-1-4503-3459-4},
year = {2015},
date = {2015-10-01},
booktitle = {A Novel Statistical Approach for Image and Video Retrieval and Its Adaption for Active Learning},
pages = {935–938},
publisher = {ACM},
address = {Brisbane, Australia},
abstract = {The ever expanding multimedia content (such as images and videos), especially on the web, necessitates e⬚ective text query-based search (or retrieval) systems. Popular approaches for addressing this issue, use the query-likelihood model which fails to capture the user's information needs. In this work therefore, we explore a new ranking approach in the context of image and video retrieval from text queries. Our approach assumes two separate underlying distributions for query and the document respectively. We then, determine the extent of similarity between these two statistical distributions for the task of ranking. Furthermore we extend our approach, using Active Learning techniques, to address the question of obtaining a good performance without requiring a fully labeled training dataset. This is done by taking Sample Uncertainty, Density and Diversity into account. Our experiments on the popular TRECVID corpus and the open, relatively small-sized USC SmartBody corpus show that we are almost at-par or sometimes better than multiple state-of-the-art baselines.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Kang, Sin-Hwa; Feng, Andrew W.; Leuski, Anton; Casas, Dan; Shapiro, Ari
The Effect of An Animated Virtual Character on Mobile Chat Interactions Book Section
In: Proceedings of the 3rd International Conference on Human-Agent Interaction, pp. 105–112, ACM, Daegu, Korea, 2015, ISBN: 978-1-4503-3527-0.
@incollection{kang_effect_2015,
title = {The Effect of An Animated Virtual Character on Mobile Chat Interactions},
author = {Sin-Hwa Kang and Andrew W. Feng and Anton Leuski and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2814957},
isbn = {978-1-4503-3527-0},
year = {2015},
date = {2015-10-01},
booktitle = {Proceedings of the 3rd International Conference on Human-Agent Interaction},
pages = {105–112},
publisher = {ACM},
address = {Daegu, Korea},
abstract = {This study explores presentation techniques for a 3D animated chat-based virtual human that communicates engagingly with users. Interactions with the virtual human occur via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with no image or a static image of a virtual character as opposed to the animated visage of a virtual human capable of displaying appropriate nonverbal behavior. We further investigate users’ responses to the animated character’s gaze aversion which displayed the character’s act of looking away from users and was presented as a listening behavior. The findings of our study demonstrate that people tend to engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that averts its gaze, compared to an animated virtual human that does not avert its gaze, a static image of a virtual character, or an audio-only interface.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Brilman, Maarten; Scherer, Stefan
A Multimodal Predictive Model of Successful Debaters or How I Learned to Sway Votes Proceedings Article
In: Proceedings of ACM Multimedia 2015, pp. 149–158, ACM, Brisbane, Australia, 2015, ISBN: 978-1-4503-3459-4.
@inproceedings{brilman_multimodal_2015,
title = {A Multimodal Predictive Model of Successful Debaters or How I Learned to Sway Votes},
author = {Maarten Brilman and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?id=2806245},
doi = {10.1145/2733373.2806245},
isbn = {978-1-4503-3459-4},
year = {2015},
date = {2015-10-01},
booktitle = {Proceedings of ACM Multimedia 2015},
pages = {149–158},
publisher = {ACM},
address = {Brisbane, Australia},
abstract = {Interpersonal skills such as public speaking are essential assets for a large variety of professions and in everyday life. The ability to communicate in social environments often greatly in uences a person's career development, can helpresolve con ict, gain the upper hand in negotiations, or sway the public opinion. We focus our investigations on a special form of public speaking, namely public debates of socioeconomic issues that a⬚ect us all. In particular, we analyze performances of expert debaters recorded through the Intelligence Squared U.S. (IQ2US) organization. IQ2US collects high-quality audiovisual recordings of these debates and publishes them online free of charge. We extract audiovisual nonverbal behavior descriptors, including facial expressions, voice quality characteristics, and surface level linguistic characteristics. Within our experiments we investigate if it is possible to automatically predict if a debater or his/her team are going to sway the most votes after the debate using multimodal machine learning and fusion approaches. We identify unimodal nonverbal behaviors that characterize successful debaters and our investigations reveal that multimodal machine learning approaches can reliably predict which individual (⬚75% accuracy) or team (85% accuracy) is going to win the most votes in the debate. We created a database consisting of over 30 debates with four speakers per debate suitable for public speaking skill analysis and plan to make this database publicly available for the research community.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Yuqiong; Lucas, Gale; Khooshabeh, Peter; Melo, Celso; Gratch, Jonathan
Effects of emotional expressions on persuasion Journal Article
In: Social Influence, vol. 10, no. 4, pp. 236–249, 2015, ISSN: 1553-4510, 1553-4529.
@article{wang_effects_2015,
title = {Effects of emotional expressions on persuasion},
author = {Yuqiong Wang and Gale Lucas and Peter Khooshabeh and Celso Melo and Jonathan Gratch},
url = {http://www.tandfonline.com/doi/full/10.1080/15534510.2015.1081856},
doi = {10.1080/15534510.2015.1081856},
issn = {1553-4510, 1553-4529},
year = {2015},
date = {2015-10-01},
journal = {Social Influence},
volume = {10},
number = {4},
pages = {236–249},
abstract = {This paper investigates how expressions of emotion affect persuasiveness when the expresser and the recipient have different levels of power. The first study demonstrates that when the recipient overpowers the expresser, emotional expressions reduce persuasion. A second study reveals that power and perceived appropriateness of emotional expressions independently moderate the effect of emotional expressions. Emotional expressions hamper persuasion when the recipient overpowers the expresser, or when the emotional expressions are considered inappropriate.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert "Skip"; Shilling, Russell; Forbell, Eric; Scherer, Stefan; Gratch, Jonathan; Morency, Louis-Philippe
Autonomous Virtual Human Agents for Healthcare Information Support and Clinical Interviewing Book Section
In: pp. 53–79, Elsevier, Inc., Philadelphia, PA, 2015, ISBN: 978-0-12-420248-1.
@incollection{rizzo_autonomous_2015,
title = {Autonomous Virtual Human Agents for Healthcare Information Support and Clinical Interviewing},
author = {Albert "Skip" Rizzo and Russell Shilling and Eric Forbell and Stefan Scherer and Jonathan Gratch and Louis-Philippe Morency},
url = {http://www.sciencedirect.com/science/article/pii/B9780124202481000039},
isbn = {978-0-12-420248-1},
year = {2015},
date = {2015-10-01},
pages = {53–79},
publisher = {Elsevier, Inc.},
address = {Philadelphia, PA},
abstract = {Over the last 20 years, a virtual revolution has taken place in the use of Virtual Reality simulation technology for clinical purposes. Recent shifts in the social and scientific landscape have now set the stage for the next major movement in Clinical Virtual Reality with the “birth” of intelligent virtual human (VH) agents. Seminal research and development has appeared in the creation of highly interactive, artificially intelligent and natural language capable VHs that can engage real human users in a credible fashion. VHs can now be designed to perceive and act in a virtual world, engage in face-to-face spoken dialogues, and in some cases they are capable of exhibiting human-like emotional reactions. This chapter will detail our applications in this area where a virtual human can provide private online healthcare information and support (i.e., SimCoach) and where a VH can serve the role as a clinical interviewer (i.e., SimSensei).},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Xu, Jie; Montague, Enid; Gratch, Jonathan; Hancock, Peter; Jeon, Myounghoon; Pfaff, Mark S.
Advances of Research in Affective Processes in Communication and Collaboration Journal Article
In: Proceedings of the Human Factors and Ergonomics Society Annual Meeting, vol. 59, no. 1, pp. 299–302, 2015, ISSN: 1541-9312.
@article{xu_advances_2015,
title = {Advances of Research in Affective Processes in Communication and Collaboration},
author = {Jie Xu and Enid Montague and Jonathan Gratch and Peter Hancock and Myounghoon Jeon and Mark S. Pfaff},
url = {http://pro.sagepub.com/lookup/doi/10.1177/1541931215591061},
doi = {10.1177/1541931215591061},
issn = {1541-9312},
year = {2015},
date = {2015-09-01},
journal = {Proceedings of the Human Factors and Ergonomics Society Annual Meeting},
volume = {59},
number = {1},
pages = {299–302},
abstract = {Affective processes have been an important research area for human factors and ergonomics. Although there is an obvious connection between affect and communication and collaboration, little research has been conducted in the human factors community until recently. In this panel, the panelists will discuss recent advances in affective research in communication and collaboration systems. Theoretical perspectives in human computer interaction, human agent interaction, and teamwork that take affective process into account will be discussed. Methodological issues will also be addressed, such as the measurements of affect, research design, and data analysis methods. Finally the applications of the theories and methods in different systems, such as human robot interaction, healthcare, and multi-tasking teams, will be discussed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2006
Marsella, Stacy C.; Carnicke, Sharon Marie; Gratch, Jonathan; Okhmatovskaia, Anna; Rizzo, Albert
An Exploration of Delsartes Structural Acting System Proceedings Article
In: Proceedings of the 6th International Conference on Intelligent Virtual Agents (IVA), pp. 80–92, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{marsella_exploration_2006,
title = {An Exploration of Delsartes Structural Acting System},
author = {Stacy C. Marsella and Sharon Marie Carnicke and Jonathan Gratch and Anna Okhmatovskaia and Albert Rizzo},
url = {http://ict.usc.edu/pubs/An%20Exploration%20of%20Delsarte%E2%80%99s%20Structural%20Acting%20System.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 6th International Conference on Intelligent Virtual Agents (IVA)},
pages = {80–92},
address = {Marina del Rey, CA},
abstract = {The designers of virtual agents often draw on a large research literature in psychology, linguistics and human ethology to design embodied agents that can interact with people. In this paper, we consider a structural acting system developed by Francois Delsarte as a possible resource in designing the nonverbal behavior of embodied agents. Using human subjects,we evaluate one component of the system, Delsarte's Cube, that addresses the meaning of differing attitudes of the hand in gestures.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Okhmatovskaia, Anna; Lamothe, Francois; Marsella, Stacy C.; Morales, Mathieu; Werf, R. J.; Morency, Louis-Philippe
Virtual Rapport Proceedings Article
In: Lecture Notes in Computer Science, pp. 14–27, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_virtual_2006-1,
title = {Virtual Rapport},
author = {Jonathan Gratch and Anna Okhmatovskaia and Francois Lamothe and Stacy C. Marsella and Mathieu Morales and R. J. Werf and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Virtual%20Rapport.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Lecture Notes in Computer Science},
volume = {4311},
pages = {14–27},
address = {Marina del Rey, CA},
abstract = {Effective face-to-face conversations are highly interactive. Participants respond to each other, engaging in nonconscious behavioral mimicry and backchanneling feedback. Such behaviors produce a subjective sense of rapport and are correlated with effective communication, greater liking and trust, and greater influence between participants. Creating rapport requires a tight sense-act loop that has been traditionally lacking in embodied conversational agents. Here we describe a system, based on psycholinguistic theory, designed to create a sense of rapport between a human speaker and virtual human listener. We provide empirical evidence that it increases speaker fluency and engagement.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Patel, Ronakkumar; Traum, David; Kennedy, Brandon
Building Effective Question Answering Characters Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_building_2006,
title = {Building Effective Question Answering Characters},
author = {Anton Leuski and Ronakkumar Patel and David Traum and Brandon Kennedy},
url = {http://ict.usc.edu/pubs/Building%20Effective%20Question%20Answering%20Characters.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {In this paper, we describe methods for building and evaluation of limited domain question-answering characters. Several classification techniques are tested, including text classification using support vector machines, language-model based retrieval, and cross-language information retrieval techniques, with the latter having the highest success rate. We also evaluated the effect of speech recognition errors on performance with users, finding that retrieval is robust until recognition reaches over 50% WER.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
An Information State-Based Dialogue Manager for Call for Fire Dialogues Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_information_2006,
title = {An Information State-Based Dialogue Manager for Call for Fire Dialogues},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/An%20Information%20State-Based%20Dialogue%20Manager%20for%20Call%20for%20Fire%20Dialogues.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {We present a dialogue manager for "Call for Fire" training dialogues. We describe the training environment, the domain, the features of its novel information state-based dialogue manager, the system it is a part of, and preliminary evaluation results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Egges, Arjan; Eliëns, Anton; Isbister, Katherine; Paiva, Ana; Rist, Thomas; Hagen, Paul
Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans Proceedings Article
In: Dagstuhl Seminar Proceedings, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_design_2006,
title = {Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans},
author = {Jonathan Gratch and Stacy C. Marsella and Arjan Egges and Anton Eliëns and Katherine Isbister and Ana Paiva and Thomas Rist and Paul Hagen},
url = {http://ict.usc.edu/pubs/Design%20criteria%20techniques%20and%20case%20studies%20for%20creating%20and%20evaluating%20interactive%20experiences%20for%20virtual%20humans.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Dagstuhl Seminar Proceedings},
abstract = {How does one go about designing a human? With the rise in recent years of virtual humans this is no longer purely a philosophical question. Virtual humans are intelligent agents with a body, often a human-like graphical body, that interact verbally and non-verbally with human users on a variety of tasks and applications. At a recent meeting on this subject, the above authors participated in a several day discussion on the question of virtual human design. Our working group approached this question from the perspective of interactivity. Specifically, how can one design effective interactive experiences involving a virtual human, and what constraints does this goal place on the form and function of an embodied conversational agent. Our group grappled with several related questions: What ideals should designers aspire to, what sources of theory and data will best lead to this goal and what methodologies can inform and validate the design process? This article summarizes our output and suggests a specific framework, borrowed from interactive media design, as a vehicle for advancing the state of interactive experiences with virtual humans.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents Book Section
In: Modeling Communication with Robots and Virtual Humans, pp. 296–309, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{traum_talking_2006,
title = {Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Talking%20to%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Modeling Communication with Robots and Virtual Humans},
pages = {296–309},
abstract = {Virtual Humans are artificial characters who look and act like humans, but inhabit a simulated environment. One important aspect of many virtual humans is their communicative dialogue ability. In this paper we outline a methodology for study of dialogue behavior and construction of virtual humans. We also consider three architectures for different types of virtual humans that have been built at the Institute for Creative Technologies.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Marsella, Stacy C.; Gratch, Jonathan
EMA: A computational model of appraisal dynamics Proceedings Article
In: Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion, Vienna, Austria, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{marsella_ema_2006,
title = {EMA: A computational model of appraisal dynamics},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/EMA-%20A%20computational%20model%20of%20appraisal%20dynamics.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion},
address = {Vienna, Austria},
abstract = {A computational model of emotion must explain both the rapid dynamics of some emotional reactions as well as the slower responses that follow deliberation. This is often addressed by positing multiple appraisal processes such as fast pattern directed vs. slower deliberative appraisals. In our view, this confuses appraisal with inference. Rather, we argue for a single and automatic appraisal process that operates over a person’s interpretation of their relationship to the environment. Dynamics arise from perceptual and inferential processes operating on this interpretation (including deliberative and reactive processes). We illustrate this perspective through the computational modeling of a naturalistic emotional situation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Graap, Ken; Manson, Brian; McNerney, Peter J.; Wiederhold, Brenda K.; Wiederhold, Mark; Spira, James
A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment Proceedings Article
In: NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment},
author = {Albert Rizzo and Jarrell Pair and Ken Graap and Brian Manson and Peter J. McNerney and Brenda K. Wiederhold and Mark Wiederhold and James Spira},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Military%20Personnel%20with%20Post%20Traumatic%20Stress%20Disorder-%20From%20Training%20to%20Toy%20to%20Treatment.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder},
abstract = {Post Traumatic Stress Disorder is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of a Virtual Iraq PTSD VR application that has been created from the virtual assets that were initially developed for a combat tactical training simulation, which then served as the inspiration for the X-Box game entitled Full Spectrum Warrior.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Allen, Brian; Dautricourt, Matthieu; Treskunov, Anton; Liewer, Matt; Graap, Ken; Reger, Greg; Rizzo, Albert
A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the IEEE VR 2006 Conference, pp. 64–71, Alexandria, VA, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{pair_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder},
author = {Jarrell Pair and Brian Allen and Matthieu Dautricourt and Anton Treskunov and Matt Liewer and Ken Graap and Greg Reger and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Post%20Traumatic%20Stress%20Disorder.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {Proceedings of the IEEE VR 2006 Conference},
pages = {64–71},
address = {Alexandria, VA},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-Centered tests with the application are currently underway at the Naval Medical Center–San Diego and within an Army Combat Stress Control Team in Iraq with clinical trials scheduled to commence in February 2006.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Traum, David
Improving Question-Answering With Linking Dialogues Proceedings Article
In: International Conference on Intelligent User Interfaces (IUI-2006), Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gandhe_improving_2006,
title = {Improving Question-Answering With Linking Dialogues},
author = {Sudeep Gandhe and Andrew S. Gordon and David Traum},
url = {http://ict.usc.edu/pubs/Improving%20Question-Answering%20With%20Linking%20Dialogues%20.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Intelligent User Interfaces (IUI-2006)},
address = {Sydney, Australia},
abstract = {Question-answering dialogue systems have found many applications in interactive learning environments. This paper is concerned with one such application for Army leadership training, where trainees input free-text questions that elicit pre-recorded video responses. Since these responses are already crafted before the question is asked, a certain degree of incoherence exists between the question that is asked and the answer that is given. This paper explores the use of short linking dialogues that stand in between the question and its video response to alleviate the problem of incoherence. We describe a set of experiments with human generated linking dialogues that demonstrate their added value. We then describe our implementation of an automated method for utilizing linking dialogues and show that these have better coherence properties than the original system without linking dialogues.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Wenji, Mao
Towards a Validated Model of "Emotional Intelligence" Proceedings Article
In: Proceedings of the 21st National Conference on Artificial Intelligence, pp. 1613–1616, Boston, MA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_towards_2006,
title = {Towards a Validated Model of "Emotional Intelligence"},
author = {Jonathan Gratch and Stacy C. Marsella and Mao Wenji},
url = {http://ict.usc.edu/pubs/Towards%20a%20Validated%20Model%20of%20Emotional%20Intelligence.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 21st National Conference on Artificial Intelligence},
volume = {2},
pages = {1613–1616},
address = {Boston, MA},
abstract = {This article summarizes recent progress in developing a validated computational account of the cognitive antecedents and consequences of emotion. We describe the potential of this work to impact a variety of AI problem domains.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Dillenbourg, Pierre; Traum, David
Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving Journal Article
In: The Journal of the Learning Sciences, vol. 15, no. 1, pp. 121–151, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{dillenbourg_sharing_2006,
title = {Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving},
author = {Pierre Dillenbourg and David Traum},
url = {http://ict.usc.edu/pubs/Sharing%20Solutions-%20Persistence%20and%20Grounding%20in%20Multimodal%20Collaborative%20Problem%20Solving.pdf},
year = {2006},
date = {2006-01-01},
journal = {The Journal of the Learning Sciences},
volume = {15},
number = {1},
pages = {121–151},
abstract = {This article reports on an exploratory study of the relationship between grounding and problem solving in multimodal computer-mediated collaboration. This article examines two different media, a shared whiteboard and a MOO environment that includes a text chat facility. A study was done on how the acknowledgment rate (how often partners give feedback of having perceived, understood, and accepted partner's contributions) varies according to the media and the content of interactions. It was expected that the whiteboard would serve to draw schemata that disambiguate chat utterances. Instead, results show that the whiteboard is primarily used to represent the state of problem solving and the chat is used for grounding information created on the whiteboard. These results are interpreted in terms of persistence: More persistent information is exchanged through the more persistent medium. The whiteboard was used as a shared memory rather than a grounding tool.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul
A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond Journal Article
In: Tutorials in Quantitative Methods for Psychology, vol. 2, no. 2, pp. 43–51, 2006.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@article{rosenbloom_cognitive_2006,
title = {A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/A%20Cognitive%20Odyssey-%20From%20the%20Power%20Law%20of%20Practice%20to%20a%20General%20Learning%20Mechanism%20and%20Beyond.pdf},
year = {2006},
date = {2006-01-01},
journal = {Tutorials in Quantitative Methods for Psychology},
volume = {2},
number = {2},
pages = {43–51},
abstract = {This article traces a line of research that began with the establishment of a pervasive regularity in human performance – the Power Law of Practice – and proceeded through several decades' worth of investigations that this opened up into learning and cognitive architecture. The results touch on both cognitive psychology and artificial intelligence, and more specifically on the possibily of building general learning mechanisms/systems. It is a story whose final chapter is still to be written.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mao, Wenji; Gratch, Jonathan
Evaluating a Computational Model of Social Causality and Responsibility Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Hakodate, Japan, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_evaluating_2006,
title = {Evaluating a Computational Model of Social Causality and Responsibility},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20Computational%20Model%20of%20Social%20Causality%20and%20Responsibility.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Hakodate, Japan},
abstract = {Intelligent agents are typically situated in a social environment and must reason about social cause and effect. Such reasoning is qualitatively different from physical causal reasoning that underlies most intelligent systems. Modeling social causal reasoning can enrich the capabilities of multi-agent systems and intelligent user interfaces. In this paper, we empirically evaluate a computational model of social causality and responsibility against human social judgments. Results from our experimental studies show that in general, the model's predictions of internal variables and inference process are consistent with human responses, though they also suggest some possible refinement to the computational model.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Graap, Ken; Pair, Jarrell; Reger,; Treskunov, Anton; Parsons, Thomas D.
User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder Proceedings Article
In: Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology, Esbjerg, Denmark, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_user-centered_2006,
title = {User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder},
author = {Albert Rizzo and Ken Graap and Jarrell Pair and Reger and Anton Treskunov and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/User-centered%20design%20driven%20development%20of%20a%20virtual%20reality%20therapy%20application%20for%20Iraq%20war%20combat-related%20post%20traumatic%20stress%20disorder.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Esbjerg, Denmark},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Ai, Hua; Traum, David
Evaluation of an Information State-Based Dialogue Manager Proceedings Article
In: Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue, Potsdam, Germany, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_evaluation_2006,
title = {Evaluation of an Information State-Based Dialogue Manager},
author = {Antonio Roque and Hua Ai and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20an%20Information%20State-Based%20Dialogue%20Manager.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue},
address = {Potsdam, Germany},
abstract = {We describe an evaluation of an information state-based dialogue manager by measuring its accuracy in information state component updating.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Lindheim, Richard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation Book Section
In: Multimodal Intelligent Information Presentation, vol. 27, pp. 305–321, Springer, Netherlands, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{swartout_simulation_2006,
title = {Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Richard Lindheim and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/SIMULATION%20MEETS%20HOLLYWOOD-%20Integrating%20Graphics,%20Sound,%20Story%20and%20Character%20for%20Immersive%20Simulation.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Multimodal Intelligent Information Presentation},
volume = {27},
pages = {305–321},
publisher = {Springer},
address = {Netherlands},
abstract = {The Institute for Creative Technologies was created at the University of Southern California with the goal of bringing together researchers in simulation technology to collaborate with people from the entertainment industry. The idea was that much more compelling simulations could be developed if researchers who understood state-of-the-art simulation technology worked together with writers and directors who knew how to create compelling stories and characters. This paper presents our first major effort to realize that vision, the Mission Rehearsal Exercise Project, which confronts a soldier trainee with the kinds of dilemmas he might reasonably encounter in a peacekeeping operation. The trainee is immersed in a synthetic world and interacts with virtual humans: artificially intelligent and graphically embodied conversational agents that understand and generate natural language, reason about world events and respond appropriately to the trainee's actions or commands. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have also joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. In this paper we describe the Mission Rehearsal Exercise system and the insights gained through this large-scale integration.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Leuski, Anton; Pair, Jarrell; Traum, David; McNerney, Peter J.; Georgiou, Panayiotis G.; Patel, Ronakkumar
How to Talk to a Hologram Proceedings Article
In: Proceedings of the 11th International Conference on Intelligent User Interfaces, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_how_2006,
title = {How to Talk to a Hologram},
author = {Anton Leuski and Jarrell Pair and David Traum and Peter J. McNerney and Panayiotis G. Georgiou and Ronakkumar Patel},
url = {http://ict.usc.edu/pubs/How%20to%20Talk%20to%20a%20Hologram.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 11th International Conference on Intelligent User Interfaces},
address = {Sydney, Australia},
abstract = {There is a growing need for creating life-like virtual human simulations that can conduct a natural spoken dialog with a human student on a predefined subject. We present an overview of a spoken-dialog system that supports a person interacting with a full-size hologram-like virtual human character in an exhibition kiosk settings. We also give a brief summary of the natural language classification component of the system and describe the experiments we conducted with the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Mao, Wenji; Marsella, Stacy C.
Modeling Social Emotions and Social Attributions Book Section
In: Sun, R. (Ed.): Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation, Cambridge University Press, 2006.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{gratch_modeling_2006,
title = {Modeling Social Emotions and Social Attributions},
author = {Jonathan Gratch and Wenji Mao and Stacy C. Marsella},
editor = {R. Sun},
url = {http://ict.usc.edu/pubs/Modeling%20Social%20Emotions%20and%20Social%20Attributions.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation},
publisher = {Cambridge University Press},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Toward Virtual Humans Journal Article
In: AI Magazine, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{swartout_toward_2006,
title = {Toward Virtual Humans},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/Toward%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-01-01},
journal = {AI Magazine},
abstract = {This paper describes the virtual humans developed as part of the Mission Rehearsal Exercise project, a virtual reality-based training system. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. We describe the key capabilities of the virtual humans, including task representation and reasoning, natural language dialogue, and emotion reasoning, and show how these capabilities are integrated to provide more human-level intelligence than would otherwise be possible.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2005
Traum, David; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan
Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_fight_2005,
title = {Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis},
author = {David Traum and William Swartout and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Fight,%20Flight,%20or%20Negotiate-%20Believable%20Strategies%20for%20Conversing%20under%20Crisis.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {This paper des ribes a model of onversation strategies implemented in virtual humans designed to help people learn negotiation skills. We motivate and dis uss these strategies and their use to allow a virtual human to engage in omplex adversarial negotiation with a human trainee. Choi e of strategy depends on both the personality of the agent and assessment of the likelihood that the negotiation an be bene ial. Exe ution of strategies an be performed by hoosing spe i dialogue behaviors su h as whether and how to respond to a proposal. Current assessment of the value of the topi , the utility of the strategy, and aÆliation toward the other onversants an be used to dynami ally hange strategies throughout the ourse of a onversation. Examples will be given from the SASO-ST proje t, in whi h a trainee learns to negotiate by intera ting with virtual humans who employ these strategies.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kallman, Marcelo; Marsella, Stacy C.
Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans Proceedings Article
In: International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kallman_hierarchical_2005,
title = {Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans},
author = {Marcelo Kallman and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Hierarchical%20Motion%20Controllers%20for%20Real-Time%20Autonomous%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Continuous and synchronized whole-body motions are essential for achieving believable autonomous virtual humans in interactive applications. We present a new motion control architecture based on generic controllers that can be hierarchically interconnected and reused in real-time. The hierarchical organization implies that leaf controllers are motion generators while the other nodes are connectors, performing operations such as interpolation, blending, and precise scheduling of children controllers. We also describe how the system can correctly handle the synchronization of gestures with speech in order to achieve believable conversational characters. For that purpose, different types of controllers implement a generic model of the different phases of a gesture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.; Kenny, Patrick G.; Hovy, Eduard; Narayanan, Shrikanth; Fast, Edward; Martinovski, Bilyana; Baghat, Rahul; Robinson, Susan; Marshall, Andrew; Wang, Dagen; Gandhe, Sudeep; Leuski, Anton
Dealing with Doctors: A Virtual Human for Non-team Interaction Proceedings Article
In: 6th SIGdial Conference on Discourse and Dialogue, Lisbon, Portugal, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_dealing_2005,
title = {Dealing with Doctors: A Virtual Human for Non-team Interaction},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella and Patrick G. Kenny and Eduard Hovy and Shrikanth Narayanan and Edward Fast and Bilyana Martinovski and Rahul Baghat and Susan Robinson and Andrew Marshall and Dagen Wang and Sudeep Gandhe and Anton Leuski},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Doctors.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {6th SIGdial Conference on Discourse and Dialogue},
address = {Lisbon, Portugal},
abstract = {We present a virtual human do tor who an engage in multi-modal negotiation dialogue with people from other organizations. The do tor is part of the SASO-ST system, used for training for non-team intera tions},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
Virtual Humans for non-team interaction training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids, Utrecht, Netherlands, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_virtual_2005,
title = {Virtual Humans for non-team interaction training},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20non-team%20interaction%20training.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids},
address = {Utrecht, Netherlands},
abstract = {We describe a model of virtual humans to be used in training for non-team interactions, such as negotiating with people from other organizations. The virtual humans build on existing task, dialogue, and emotion models, with an added model of trust, which are used to understand and produce interactional moves. The model has been implemented within an agent in the SASO-ST system, and some example dialogues are given, illustrating the necessity for building social bonds.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a computational model of emotion Journal Article
In: Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004, vol. 11, no. 1, pp. 23–43, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_evaluating_2005,
title = {Evaluating a computational model of emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20computational%20model%20of%20emotion.pdf},
year = {2005},
date = {2005-07-01},
journal = {Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004},
volume = {11},
number = {1},
pages = {23–43},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we evaluate them against the phenomena they purport to model. In this paper, we present one method to evaluate an emotion model that compares the behavior of the model against human behavior using a standard clinical instrument for assessing human emotion and coping. We use this method to evaluate the Emotion and Adaptation (EMA) model of emotion Gratch and Marsella. The evaluation highlights strengths of the approach and identifies where the model needs further development.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Ettaile, Emil; Gandhe, Sudeep; Georgiou, Panayiotis G.; Knight, Kevin; Marcu, Daniel; Narayanan, Shrikanth; Traum, David; Belvin, Robert
Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues Proceedings Article
In: Proceedings of the ACL Interactive Poster and Demonstration Sessions, pp. 89–92, Ann Arbor, MI, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ettaile_transonics_2005,
title = {Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues},
author = {Emil Ettaile and Sudeep Gandhe and Panayiotis G. Georgiou and Kevin Knight and Daniel Marcu and Shrikanth Narayanan and David Traum and Robert Belvin},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {Proceedings of the ACL Interactive Poster and Demonstration Sessions},
pages = {89–92},
address = {Ann Arbor, MI},
abstract = {We briefly describe a two-way speech-to-speech English-Farsi translation system prototype developed for use in doctorpatient interactions. The overarching philosophy of the developers has been to create a system that enables effective communication, rather than focusing on maximizing component-level performance. The discussion focuses on the general approach and evaluation of the system by an independent government evaluation team.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
A Computational Model of Dynamic Perceptual Attention for Virtual Humans Proceedings Article
In: Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation, Universal City, CA, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_computational_2005,
title = {A Computational Model of Dynamic Perceptual Attention for Virtual Humans},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Dynamic%20Perceptual%20Attention%20for%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation},
address = {Universal City, CA},
abstract = {An important characteristic of a virtual human is the ability to direct its perceptual attention to objects and locations in a virtual environment in a manner that looks believable and serves a functional purpose. We have developed a computational model of perceptual attention that mediates top-down and bottom-up attention processes of virtual humans in virtual environments. In this paper, we propose a perceptual attention model that will integrate perceptual attention toward objects and locations in the environment with the need to look at other parties in a social context.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nijholt, Anton; Traum, David
The Virtuality Continuum Revisited Proceedings Article
In: CHI 2005 Workshop on the Virtuality Continuum Revisited, Portland, OR, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nijholt_virtuality_2005,
title = {The Virtuality Continuum Revisited},
author = {Anton Nijholt and David Traum},
url = {http://ict.usc.edu/pubs/The%20Virtuality%20Continuum%20Revisited.pdf},
year = {2005},
date = {2005-04-01},
booktitle = {CHI 2005 Workshop on the Virtuality Continuum Revisited},
address = {Portland, OR},
abstract = {We survey the themes and the aims of a workshop devoted to the state-of-the-art virtuality continuum. In this continuum, ranging from fully virtual to real physical environments, allowing for mixed, augmented and desktop virtual reality, several perspectives can be taken. Originally, the emphasis was on display technologies. Here we take the perspective of the inhabited environment, that is, environments positioned somewhere on this continuum that are inhabited by virtual (embodied) agents, that interact with each other and with their human partners. Hence, we look at it from the multi-party interaction perspective. In this workshop we will investigate the current state of the art, its shortcomings and a future research agenda.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Mao, Wenji; Gratch, Jonathan; Marsella, Stacy C.
Mitigation Theory: An Integrated Approach Proceedings Article
In: Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci), Stresa, Italy, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{martinovski_mitigation_2005,
title = {Mitigation Theory: An Integrated Approach},
author = {Bilyana Martinovski and Wenji Mao and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Mitigation%20Theory-%20An%20Integrated%20Approach.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci)},
address = {Stresa, Italy},
abstract = {The purpose of this paper is to develop a theoretical model of mitigation by integrating cognitive and discourse approaches to appraisal and coping. Mitigation involves strategic, emotional, linguistic, and Theory of Mind processes on different levels of consciousness. We emphasize that discourse analysis can assist our understanding of these processes.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Evaluating Social Causality and Responsibility Models: An Initial Report Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 03 2005, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_evaluating_2005,
title = {Evaluating Social Causality and Responsibility Models: An Initial Report},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR-03-2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 03 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent sys- tems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich believability and cognitive capabili- ties of social intelligent agents. In this report, we present a general computational model of social causality and responsibility, and empirical results of a preliminary evaluation of the model in comparison with several other approaches.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Natural Behavior of a Listening Agent Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA), pp. 25–36, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{maatman_natural_2005,
title = {Natural Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Natural%20Behavior%20of%20a%20Listening%20Agent.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA)},
pages = {25–36},
address = {Kos, Greece},
abstract = {In contrast to the variety of listening behaviors produced in human-to-human interaction, most virtual agents sit or stand passively when a user speaks. This is a reflection of the fact that although the correct responsive behavior of a listener during a conversation is often related to the semantics, the state of current speech understanding technology is such that semantic information is unavailable until after an utterance is complete. This paper will illustrate that appropriate listening behavior can also be generated by other features of a speaker's behavior that are available in real time such as speech quality, posture shifts and head movements. This paper presents a mapping from these real-time obtainable features of a human speaker to agent listening behaviors.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dialog Simulation for Background Characters Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_dialog_2005,
title = {Dialog Simulation for Background Characters},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dialog%20Simulation%20for%20Background%20Characters.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Background characters in virtual environments do not require the same amount of processing that is usually required by main characters, however we want simulation that is more believable than random behavior. We describe an algorithm that generates bhavior for background characters involved in conversation that supports dynamic changes to conversation group structure. We present an evaluation of this algorithm and make suggestions on how to further improve believability of the simulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Responsive Behavior of a Listening Agent Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2005, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@techreport{maatman_responsive_2005,
title = {Responsive Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/ICT-TR.02.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 02 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {The purpose of this assignment is twofold. First the possibility of generating real time responsive behavior is evaluated in order to create a more human-like agent. Second, the effect of the behavior of the agent on the human interactor is evaluated. The main motivation for the focus on responsive gestures is because much research has been done already on gestures that accompany the speaker, and nothing on gesture that accompany the listener, although responsiveness is a crucial part of a conversation. The responsive behavior of a virtual agent consists of performing gestures during the time a human is speaking to the agent. To generate the correct gestures, first a literature research is carried out, from which is concluded that with the current of the current Natural Language Understanding technology, it is not possible to extract semantic features of the human speech in real time. Thus, other features have to be considered. The result of the literature research is a basic mapping between real time obtainable features and their correct responsive behavior: - if the speech contains a relatively long period of low pitch then perform a head nod. - if the speech contains relatively high intensity then perform a head nod - if the speech contains disfluency then perform a posture shift, gazing behavior or a frown - if the human performs a posture shift then mirror this posture shift - if the human performs a head shake then mirror this head shake - if the human performs major gazing behavior then mimic this behavior A design has been made to implement this mapping into the behavior of a virtual agent and this design has been implemented which results in two programs. One to mirror the physical features of the human and one to extract the speech features from the voice of the human. The two programs are combined and the effect of the resulting behavior on the human interactor has been tested. The results of these tests are that the performing of responsive behavior has a positive effect on the natural behavior of a virtual agent and thus looks promising for future research. However, the gestures proposed by this mapping are not always context-independent. Thus, much refinement is still to be done and more functionality can be added to improve the responsive behavior. The conclusion of this research is twofold. First the performing of responsive behaviors in real time is possible with the presented mapping and this results in a more natural behaving agent. Second, some responsive behavior is still dependant of semantic information. This leaves open the further enhancement of the presented mapping in order to increase the responsive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Rizzo, Albert; Pair, Jarrell; McNerney, Peter J.; Eastlund, Ernie; Manson, Brian; Gratch, Jonathan; Hill, Randall W.; Swartout, William
Development of a VR Therapy Application for Iraq War Military Personnel with PTSD Book Section
In: Studies in Health Technology and Informatics, vol. 111, no. 13, pp. 407+413, 13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@incollection{rizzo_development_2005-1,
title = {Development of a VR Therapy Application for Iraq War Military Personnel with PTSD},
author = {Albert Rizzo and Jarrell Pair and Peter J. McNerney and Ernie Eastlund and Brian Manson and Jonathan Gratch and Randall W. Hill and William Swartout},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20VR%20Therapy%20Application%20for%20Iraq%20War%20Veterans%20with%20PTSD.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Studies in Health Technology and Informatics},
volume = {111},
number = {13},
pages = {407+413},
address = {13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA},
series = {Medicine Meets Virtual Reality},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 returning Iraq War military personnel are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure therapy has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of an Iraq War PTSD VR application that is being created from the virtual assets that were initially developed for theX-Box game entitled Full Spectrum Warrior which was inspired by a combat tactical training simulation, Full Spectrum Command.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gratch, Jonathan; Marsella, Stacy C.
Lessons from Emotion Psychology for the Design of Lifelike Characters Journal Article
In: Applied Artificial Intelligence Journal, vol. 19, pp. 215–233, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_lessons_2005,
title = {Lessons from Emotion Psychology for the Design of Lifelike Characters},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Lessons%20from%20Emotion%20Psychology%20for%20the%20Design%20of%20Lifelike%20Characters.pdf},
year = {2005},
date = {2005-01-01},
journal = {Applied Artificial Intelligence Journal},
volume = {19},
pages = {215–233},
abstract = {This special issue describes a number of applications that utilize lifelike characters that teach indirectly, by playing some role in a social interaction with a user. The design of such systems reflects a compromise between competing, sometimes unarticulated de- mands: they must realistically exhibit the behaviors and characteristics of their role, they must facilitate the desired learning, and they must work within the limitations of current technology, and there is little theoretical or empirical guidance on the impact of these compromises on learning. Our perspective on this problem is shaped by our interest in the role of emotion and emotional behaviors in such forms of learning. In recent years, there has been an explosion of interest in the role of emotion in the design of virtual hu- mans. The techniques and motivations underlying these various efforts can seem, from an outsider's perspective, as bewildering and multifaceted as the concept of emotion itself is generally accused of being. Drawing on insights from emotion psychology, this article attempts to clarify for the designers of educational agents the various theoretical perspec- tives on the concept of emotion with the aim of giving guidance to designers of educa- tional agents.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kock, Arien; Gratch, Jonathan
An Evaluation of Automatic Lip-syncing Methods for Game Environments Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2005, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{kock_evaluation_2005,
title = {An Evaluation of Automatic Lip-syncing Methods for Game Environments},
author = {Arien Kock and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR.01.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 01 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Lip-synching is the production of articulator motion corresponding to a given audible utterance. The Mission Rehearsal Exercise training system requires lip-synching to increase the believability of its virtual agents. In this report I document the selection, exploration, evaluation and comparison of several candidate lip-synching systems, ending with a recommendation. The evaluation focuses on the believability of articulators' expression, the foreseeable difficulty of integration into MRE’s architecture, the support for facial expressions related to semantics and prosodic features as well as the scalability of each system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
Controlling the Focus of Perceptual Attention in Embodied Conversational Agents Proceedings Article
In: Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems, 2005, ISBN: 1-59593-093-0.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_controlling_2005,
title = {Controlling the Focus of Perceptual Attention in Embodied Conversational Agents},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/Controlling%20the%20Focus%20of%20Perceptual%20Attention%20in%20Embodied%20Conversational%20Agents.pdf},
doi = {10.1145/1082473.1082641},
isbn = {1-59593-093-0},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems},
abstract = {In this paper, we present a computational model of dynamic perceptual attention for virtual humans. The computational models of perceptual attention that we surveyed fell into one of two camps: top-down and bottom-up. Biologically inspired computational models [2] typically focus on the bottom-up aspects of attention, while most virtual humans [1,3,7] implement a top-down form of attention. Bottom-up attention models only consider the sensory information without taking into consideration the saliency based on tasks or goals. As a result, the outcome of a purely bottom-up model will not consistently match the behavior of real humans in certain situations. Modeling perceptual attention as a purely top-down process, however, is also not sufficient for implementing a virtual human. A purely top-down model does not take into account the fact that virtual humans need to react to perceptual stimuli vying for attention. Top-down systems typically handle this in an ad hoc manner by encoding special rules to catch certain conditions in the environment. The problem with this approach is that it does not provide a principled way of integrating the ever-present bottom-up perceptual stimuli with top-down control of attention. This model extends the prior model [7] with perceptual resolution based on psychological theories of human perception [4]. This model allows virtual humans to dynamically interact with objects and other individuals, balancing the demands of goal-directed behavior with those of attending to novel stimuli. This model has been implemented and tested with the MRE Project [5].},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Morie, Jacquelyn; Williams, Josh; Pair, Jarrell; Buckwalter, John Galen
Human Emotional State and its Relevance for Military VR Training Proceedings Article
In: Proceedings of the 11th International Conference on Human-Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans, Virtual Worlds
@inproceedings{rizzo_human_2005,
title = {Human Emotional State and its Relevance for Military VR Training},
author = {Albert Rizzo and Jacquelyn Morie and Josh Williams and Jarrell Pair and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Human%20Emotional%20State%20and%20its%20Relevance%20for%20Military%20VR%20Training.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 11th International Conference on Human-Computer Interaction},
address = {Las Vegas, NV},
abstract = {Combat environments by their nature can produce a dramatic range of emotional responses in military personnel. When immersed in the emotional "fog of war," the potential exists for optimal human decision-making and performance of goal-directed activities to be seriously compromised. This may be especially true when combat training is conducted under conditions that lack emotional engagement by the soldier. Real world military training often naturally includes stress induction that aims to promote a similarity of internal emotional stimulus cues with what is expected to be present on the battlefield. This approach to facilitating optimal training effectiveness is supported by a long history of learning theory research. Current Virtual Reality military training approaches are noteworthy in their emphasis on creating hi-fidelity graphic and audio realism with the aim to foster better transfer of training. However, less emphasis is typically placed on the creation of emotionally evocative virtual training scenarios that can induce emotional stress in a manner similar to what is typically experienced under real world training conditions. As well, emotional issues in the post-combat aftermath need to be addressed, as can be seen in the devastating emotional difficulties that occur in some military personnel following combat. This is evidenced by the number of recent medical reports that suggest the incidence of "Vietnam-levels" of combat-related Post Traumatic Stress Disorder symptomatology in returning military personnel from the Iraq conflict. In view of these issues, the USC Institute for Creative Technologies (ICT) has initiated a research program to study emotional issues that are relevant to VR military applications. This paper will present the rationale and status of two ongoing VR research programs at the ICT that address sharply contrasting ends of the emotional spectrum relevant to the military: 1. The Sensory Environments Evaluation (SEE) Project is examining basic factors that underlie emotion as it occurs within VR training environments and how this could impact transfer of training, and 2. The Full Spectrum Warrior (FSW) Post Traumatic Stress Disorder Project which is currently in the process of converting the existing FSW combat tactical simulation training scenario (and X-Box game) into a VR treatment system for the conduct of graduated exposure therapy in Iraq war military personnel with Post Traumatic Stress Disorder.},
keywords = {MedVR, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Social Causality and Responsibility: Modeling and Evaluation Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA), pp. 191–204, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2005,
title = {Social Causality and Responsibility: Modeling and Evaluation},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Causality%20and%20Responsibility-%20Modeling%20and%20Evaluation.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA)},
pages = {191–204},
address = {Kos, Greece},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent systems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich the believability and the cognitive capabilities of social intelligent agents. In this paper, we present a general computational model of social causality and responsibility, and empirically evaluate and compare the model with several other approaches.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Marsella, Stacy C.
Rejection of empathy and its linguistic manifestations Proceedings Article
In: Proceedings of Conference on Formal and Informal Negotiation (FINEXIN), Ottowa, Canada, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{martinovski_rejection_2005,
title = {Rejection of empathy and its linguistic manifestations},
author = {Bilyana Martinovski and David Traum and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Rejection%20of%20empathy%20and%20its%20linguistic%20manifestations.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of Conference on Formal and Informal Negotiation (FINEXIN)},
address = {Ottowa, Canada},
abstract = {Trust is a crucial quality in the development of individuals and societies and empathy plays a key role in the formation of trust. Trust and empathy have growing importance in studies of negotiation. However, empathy can be rejected which complicates its role in negotiation. This paper presents a linguistic analysis of empathy by focusing on rejection of empathy in negotiation. Some of the rejections are due to failed recognition of the rejector's needs and desires whereas others have mainly strategic functions gaining momentum in the negotiation. In both cases, rejection of empathy is a phase in the negotiation not a breakdown.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2004
Patel, Jigish; Parker, Robert; Traum, David
Simulation of Small Group Discussions for Middle Level of Detail Crowds Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_simulation_2004,
title = {Simulation of Small Group Discussions for Middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Simulation%20of%20Small%20Group%20Discussions%20for%20Middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {We present an algorithm for animating middle level of detail crowds engaged in conversation. Based on previous work from Padilha and Carletta, this algorithm is used to provide gestures for group characters in an embedded virtual world. The algorithm is implemented and used within the Mission Rehearsal Exercise project at ICT to control Bosnian crowd members.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Leuski, Anton; Traum, David
First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gandhe_first_2004,
title = {First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers},
author = {Sudeep Gandhe and Andrew S. Gordon and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/First%20Steps%20Toward%20Linking%20Dialogues-%20Mediating%20Between%20Free-text%20Questions%20and%20Pre-recorded%20Video%20Answers.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {Pre-recorded video segments can be very compelling for a variety of immersive training purposes, including providing answers to questions in after-action reviews. Answering questions fluently using pre-recorded video poses challenges, however. When humans interact, answers are constructed after questions are posed. When answers are pre-recorded, even if a correct answer exists in a library of video segments, the answer may be phrased in a way that is not coherent with the question. This paper reports on basic research experiments with short "linking dialogues" that mediate between the question and answer to reduce (or eliminate) the incoherence, resulting in more natural human-system interaction. A set of experiments were performed in which links were elicited to bridge between questions from users of an existing training application and selected answers from the system, and then comparisons made with unlinked answers. The results show that a linking dialogue can signiï¬cantly increase the perceived relevance of the system's answers.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Towards a Validated Model of the Influence of Emotion on Human Performance Proceedings Article
In: Proceedings of the 24th Army Science Conference, 2004.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_towards_2004,
title = {Towards a Validated Model of the Influence of Emotion on Human Performance},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/TOWARDS%20A%20VALIDATED%20MODEL%20OF%20THE%20INFLUENCE%20OF%20EMOTION%20ON%20HUMAN%20PERFORMANCE.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating the modeling and use of emotion in virtual humans Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_evaluating_2004,
title = {Evaluating the modeling and use of emotion in virtual humans},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20the%20modeling%20and%20use%20of%20emotion%20in%20virtual%20humans.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {New York, NY},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we begin to evaluate them against the phenomena they purport to model. In this paper, we present one methodology to evaluate an emotion model. The methodology is based on comparing the behavior of the computational model against human behavior, using a standard clinical instrument for assessing human emotion and coping. We use this methodology to evaluate the EMA model of emotion. The model did quite well. And, as expected, the comparison helped identify where the model needs further development.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Muller, T. J.; Hartholt, Arno; Marsella, Stacy C.; Gratch, Jonathan; Traum, David
Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{muller_you_2004,
title = {Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue},
author = {T. J. Muller and Arno Hartholt and Stacy C. Marsella and Jonathan Gratch and David Traum},
url = {http://ict.usc.edu/pubs/Do%20you%20want%20to%20talk%20about%20it.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Kloster Irsee, Germany},
abstract = {In this paper, we descrribe an implemented system for emotion-referring dialogue. An agen can engage in emotion-referring dialogue if it first has a model of its own emotions, and secondly has a way of talking about them. We create this facility in MRE Project's virtual humans, building upon the existing emotion and dialogue facilities of these agents.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Jigish; Parker, Robert; Traum, David
Small group discussion simulation for middle Level of Detail Crowds Proceedings Article
In: 8th Workshop on Semantics and Pragmatics of Dialogue, Barcelona, Spain, 2004.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_small_2004,
title = {Small group discussion simulation for middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Small%20group%20discussion%20simulation%20for%20middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-07-01},
booktitle = {8th Workshop on Semantics and Pragmatics of Dialogue},
address = {Barcelona, Spain},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
Expressive Behaviors for Virtual Worlds Book Section
In: Life-Like Characters: Tools, Affective Functions, and Applications, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{marsella_expressive_2004,
title = {Expressive Behaviors for Virtual Worlds},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Expressive%20Behaviors%20for%20Virtual%20Worlds.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {Life-Like Characters: Tools, Affective Functions, and Applications},
abstract = {A person's behavior provides signi⬚cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a wide range of cognitive and motor capabilities, a model of task-oriented emotional appraisal and socially situated planning, and a model of how emotions and coping impact physical behavior. We describe the key research issues and approach in each of these prior systems, as well as our integration and its initial implementation in a leadership training system.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a General Model of Emotional Appraisal and Coping Proceedings Article
In: AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations, Palo Alto, CA, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_evaluating_2004-1,
title = {Evaluating a General Model of Emotional Appraisal and Coping},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20General%20Model%20of%20Emotional%20Appraisal%20and%20Coping.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations},
address = {Palo Alto, CA},
abstract = {Introduction: In our research, we have developed a general computational model of human emotion. The model attempts to account for both the factors that give rise to emotions as well as the wide-ranging impact emotions have on cognitive and behavioral responses. Emotions influence our beliefs, our decision-making and how we adapt our behavior to the world around us. While most apparent in moments of great stress, emotions sway even the mundane decisions we face in everyday life. Emotions also infuse our social relationships. Our interactions with each other are a source of many emotions and we have developed a range of behaviors that can communicate emotional information as well as an ability to recognize and be influenced by the emotional arousal of others. By virtue of their central role and wide influence, emotion arguably provides the means to coordinate the diverse mental and physical components required to respond to the world in a coherent fashion. (1st Paragraph)},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
A Utility-Based Approach to Intention Recognition Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_utility-based_2004,
title = {A Utility-Based Approach to Intention Recognition},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Utility-Based%20Approach%20to%20Intention%20Recognition.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {New York, NY},
abstract = {Based on the assumption that a rational agent will adopt a plan that maximizes the expected utility, we present a utility-based approach to plan recognition problem in this paper. The approach explicitly takes the observed agent's preferences into consideration, and computes the estimated expected utilities of plans to disambiguate competing hypotheses. Online plan recognition is realized by incrementally using plan knowledge and observations to change state probabilities. We also discuss the work and compare it with other probabilistic models in the paper.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Garg, Saurabh; Martinovski, Bilyana; Robinson, Susan; Stephan, Jens; Tetreault, Joel; Traum, David
Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{garg_evaluation_2004,
title = {Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus},
author = {Saurabh Garg and Bilyana Martinovski and Susan Robinson and Jens Stephan and Joel Tetreault and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Transcription%20and%20Annotation%20tools%20for%20a%20Multi-modal,%20Multi-party%20dialogue%20corpus.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper reviews nine available transcription and annotation tools, considering in particular the special difï¬culties arising from transcribing and annotating multi-party, multi-modal dialogue. Tools are evaluated as to the ability to support the user's annotation scheme, ability to visualize the form of the data, compatibility with other tools, flexibility of data representation, and general user-friendliness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}