Publications
Search
Gratch, Jonathan; DeVault, David; Lucas, Gale M.; Marsella, Stacy
Negotiation as a Challenge Problem for Virtual Humans Inproceedings
In: Brinkman, Willem-Paul; Broekens, Joost; Heylen, Dirk (Ed.): Intelligent Virtual Agents, pp. 201–215, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
@inproceedings{gratch_negotiation_2015,
title = {Negotiation as a Challenge Problem for Virtual Humans},
author = {Jonathan Gratch and David DeVault and Gale M. Lucas and Stacy Marsella},
editor = {Willem-Paul Brinkman and Joost Broekens and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Negotiation%20as%20a%20Challenge%20Problem%20for%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7_21},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {201--215},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {We argue for the importance of negotiation as a challenge problem for virtual human research, and introduce a virtual conversational agent that allows people to practice a wide range of negotiation skills. We describe the multi-issue bargaining task, which has become a de facto standard for teaching and research on negotiation in both the social and computer sciences. This task is popular as it allows scientists or instructors to create a variety of distinct situations that arise in real-life negotiations, simply by manipulating a small number of mathematical parameters. We describe the development of a virtual human that will allow students to practice the interpersonal skills they need to recognize and navigate these situations. An evaluation of an early wizard-controlled version of the system demonstrates the promise of this technology for teaching negotiation and supporting scientific research on social intelligence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Hill, Susan; Morency, Louis-Philippe; Pynadath, David; Traum, David
Exploring the Implications of Virtual Human Research for Human-Robot Teams Inproceedings
In: Virtual, Augmented and Mixed Reality, pp. 186–196, Springer International Publishing, Los Angeles, CA, 2015, ISBN: 978-3-319-21066-7 978-3-319-21067-4.
@inproceedings{gratch_exploring_2015,
title = {Exploring the Implications of Virtual Human Research for Human-Robot Teams},
author = {Jonathan Gratch and Susan Hill and Louis-Philippe Morency and David Pynadath and David Traum},
url = {http://ict.usc.edu/pubs/Exploring%20the%20Implications%20of%20Virtual%20Human%20Research%20for%20Human-Robot%20Teams.pdf},
doi = {10.1007/978-3-319-21067-4_20},
isbn = {978-3-319-21066-7 978-3-319-21067-4},
year = {2015},
date = {2015-08-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9179},
pages = {186--196},
publisher = {Springer International Publishing},
address = {Los Angeles, CA},
abstract = {This article briefly explores potential synergies between the fields of virtual human and human-robot interaction research. We consider challenges in advancing the effectiveness of human-robot teams makes recommendations for enhancing this by facilitating synergies between robotics and virtual human research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Stratou, Giota; Lucas, Gale M.; Gratch, Jonathan
Comparing Behavior Towards Humans and Virtual Humans in a Social Dilemma Inproceedings
In: Intelligent Virtual Agents, pp. 452–460, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
@inproceedings{hoegen_comparing_2015,
title = {Comparing Behavior Towards Humans and Virtual Humans in a Social Dilemma},
author = {Rens Hoegen and Giota Stratou and Gale M. Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Comparing%20Behavior%20Towards%20Humans%20and%20Virtual%20Humans%20in%20a%20Social%20Dilemma.pdf},
doi = {10.1007/978-3-319-21996-7 48},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {452--460},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {The difference of shown social behavior towards virtual humans and real humans has been subject to much research. Many of these studies compare virtual humans (VH) that are presented as either virtual agents controlled by a computer or as avatars controlled by real humans. In this study we directly compare VHs with real humans. Participants played an economic game against a computer-controlled VH or a visible human opponent. Decisions made throughout the game were logged, additionally participants’ faces were filmed during the study and analyzed with expression recognition software. The analysis of choices showed participants are far more willing to violate social norms with VHs: they are more willing to steal and less willing to forgive. Facial expressions show trends that suggest they are treating VHs less socially. The results highlight, that even in impoverished social interactions, VHs have a long way to go before they can evoke truly human-like responses.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jon; Lucas, Gale; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli
To tweet or not to tweet: The question of emotion and excitement about sporting events Inproceedings
In: Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion, Geneva, Switzerland, 2015.
@inproceedings{gratch_tweet_2015,
title = {To tweet or not to tweet: The question of emotion and excitement about sporting events},
author = {Jon Gratch and Gale Lucas and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler},
url = {http://ict.usc.edu/pubs/To%20tweet%20or%20not%20to%20tweet%20-The%20question%20of%20emotion%20and%20excitement%20about%20sporting%20events.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion},
address = {Geneva, Switzerland},
abstract = {Sporting events can serve as laboratories to explore emotion and computational tools provide new ways to examine emotional processes “in the wild”. Moreover, emotional processes are assumed -but untested- in sports economics. For example, according to the well-studied uncertainty of outcome hypothesis (UOH), “close” games are more exciting and therefore better attended. If one team were certain to win, it would take away a major source of excitement, reducing positive affect, and therefore decreasing attendance. The role of emotion here is assumed but has not been tested; furthermore, the measures used (ticket sales, attendance, TV-viewership) do not allow for such a test because they are devoid of emotional content. To address this problem, we use tweets per minute (specifically, tweets posted during 2014 World Cup with official game hashtags). Sentiment analysis of these tweets can give interesting insights into what emotional processes are involved. Another benefit of tweets is that they are dynamic, and novel results from dynamic analyses (of TV-viewership) suggest that the UOH effect can actually reverse as games unfold (people switch channels away from close games). We therefore also reconsider the UOH, specifically, extending it by both examining sentiment and dynamic changes during the game. To consider such changes, we focus on games that could have been close (high in uncertainty), but ended up being lower in uncertainty. We operationalize such unexpected certainty of outcome as the extent to which games are predicted to be “close” (based on betting odds), but ended up with a bigger difference between the teams’ scores than was expected. Statistical analyses revealed that, contrary to the UOH, games with a bigger difference in score between teams than expected had higher tweets per minute. We also performed sentiment analysis, categorizing each tweet as positive, negative or neutral, and found that games with higher tweets per minute also have a higher percentage of negative tweets. Furthermore, games that have a bigger difference than expected have a higher percentage of negative tweets (compared to games closer to what is expected). This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion (and not positive emotion). The results are discussed in terms of innovations in methodology and understanding the role of emotion for “tuning in” to real world events. Further research could explore the specific mechanisms that link negative sentiment to excitement, such as worry or out-group derogation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cummins, Nicholas; Scherer, Stefan; Krajewski, Jarek; Schnieder, Sebastian; Epps, Julien; Quatieri, Thomas F.
A Review of Depression and Suicide Risk Assessment Using Speech Analysis Journal Article
In: Speech Communication, vol. 71, pp. 10 – 49, 2015, ISSN: 0167-6393.
@article{cummins_review_2015,
title = {A Review of Depression and Suicide Risk Assessment Using Speech Analysis},
author = {Nicholas Cummins and Stefan Scherer and Jarek Krajewski and Sebastian Schnieder and Julien Epps and Thomas F. Quatieri},
url = {http://www.sciencedirect.com/science/article/pii/S0167639315000369},
doi = {http://dx.doi.org/10.1016/j.specom.2015.03.004},
issn = {0167-6393},
year = {2015},
date = {2015-07-01},
journal = {Speech Communication},
volume = {71},
pages = {10 -- 49},
abstract = {This paper is the first review into the automatic analysis of speech for use as an objective predictor of depression and suicidality. Both conditions are major public health concerns; depression has long been recognised as a prominent cause of disability and burden worldwide, whilst suicide is a misunderstood and complex course of death that strongly impacts the quality of life and mental health of the families and communities left behind. Despite this prevalence the diagnosis of depression and assessment of suicide risk, due to their complex clinical characterisations, are difficult tasks, nominally achieved by the categorical assessment of a set of specific symptoms. However many of the key symptoms of either condition, such as altered mood and motivation, are not physical in nature; therefore assigning a categorical score to them introduces a range of subjective biases to the diagnostic procedure. Due to these difficulties, research into finding a set of biological, physiological and behavioural markers to aid clinical assessment is gaining in popularity. This review starts by building the case for speech to be considered a key objective marker for both conditions; reviewing current diagnostic and assessment methods for depression and suicidality including key non-speech biological, physiological and behavioural markers and highlighting the expected cognitive and physiological changes associated with both conditions which affect speech production. We then review the key characteristics; size, associated clinical scores and collection paradigm, of active depressed and suicidal speech databases. The main focus of this paper is on how common paralinguistic speech characteristics are affected by depression and suicidality and the application of this information in classification and prediction systems. The paper concludes with an in-depth discussion on the key challenges – improving the generalisability through greater research collaboration and increased standardisation of data collection, and the mitigating unwanted sources of variability – that will shape the future research directions of this rapidly growing field of speech processing research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kommers, Cody; Ustun, Volkan; Demski, Abram; Rosenbloom, Paul
Hierarchical Reasoning with Distributed Vector Representations Inproceedings
In: Proceedings of 37th Annual Conference of the Cognitive Science Society, Cognitive Science Society, Pasadena, CA, 2015.
@inproceedings{kommers_hierarchical_2015,
title = {Hierarchical Reasoning with Distributed Vector Representations},
author = {Cody Kommers and Volkan Ustun and Abram Demski and Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Hierarchical%20Reasoning%20with%20Distributed%20Vector%20Representations.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of 37th Annual Conference of the Cognitive Science Society},
publisher = {Cognitive Science Society},
address = {Pasadena, CA},
abstract = {We demonstrate that distributed vector representations are capable of hierarchical reasoning by summing sets of vectors representing hyponyms (subordinate concepts) to yield a vector that resembles the associated hypernym (superordinate concept). These distributed vector representations constitute a potentially neurally plausible model while demonstrating a high level of performance in many different cognitive tasks. Experiments were run using DVRS, a word embedding system designed for the Sigma cognitive architecture, and Word2Vec, a state-of-the-art word embedding system. These results contribute to a growing body of work demonstrating the various tasks on which distributed vector representations perform competently.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Demski, Abram
Expression Graphs Unifying Factor Graphs and Sum-Product Networks Inproceedings
In: Artificial General Intelligence, pp. 241–250, Springer, Berlin, Germany, 2015.
@inproceedings{demski_expression_2015,
title = {Expression Graphs Unifying Factor Graphs and Sum-Product Networks},
author = {Abram Demski},
url = {http://ict.usc.edu/pubs/Expression%20Graphs%20Unifying%20Factor%20Graphs%20and%20Sum-Product%20Networks.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Artificial General Intelligence},
pages = {241--250},
publisher = {Springer},
address = {Berlin, Germany},
abstract = {Factor graphs are a very general knowledge representation, subsuming many existing formalisms in AI. Sum-product networks are a more recent representation, inspired by studying cases where factor graphs are tractable. Factor graphs emphasize expressive power, while sum-product networks restrict expressiveness to get strong guarantees on speed of inference. A sum-product network is not simply a restricted factor graph, however. Although the inference algorithms for the two structures are very similar, translating a sum-product network into factor graph representation can result in an exponential slowdown. We propose a formalism which generalizes factor graphs and sum-product networks, such that inference is fast in cases whose structure is close to a sum-product network.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz; Traum, David
Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game Inproceedings
In: Proceeding of AHFE 2015, Las Vegas, NV, 2015.
@inproceedings{nouri_cross_2015,
title = {Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Cross%20cultural%20report%20of%20values%20and%20decisions%20in%20the%20multi%20round%20ultimatum%20game%20and%20the%20centipede%20game.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceeding of AHFE 2015},
address = {Las Vegas, NV},
abstract = {This paper investigates the cultural differences in decision making behavior of people from the US and India. We study players from these cultures playing the Multi Round Ultimatum Game and the Centipede Game online. In order to study how people from different cultures evaluate decisions we use criteria from the Multi Attribute Relational Values (MARV) survey. Our results confirm the existence of cultural differences in how people from US and India make decisions in the Ultimatum and Centipede games. We also observe differences in responses to survey questions implying differences in the amount of importance that the two cultures assign to the MARV decision making criteria.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Gratch, Jonathan; Ustun, Volkan
Towards Emotion in Sigma: From Appraisal to Attention Inproceedings
In: Proceedings of AGI 2015, pp. 142 – 151, Springer International Publishing, Berlin, Germany, 2015.
@inproceedings{rosenbloom_towards_2015,
title = {Towards Emotion in Sigma: From Appraisal to Attention},
author = {Paul S. Rosenbloom and Jonathan Gratch and Volkan Ustun},
url = {http://ict.usc.edu/pubs/Towards%20Emotion%20in%20Sigma%20-%20From%20Appraisal%20to%20Attention.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of AGI 2015},
volume = {9205},
pages = {142 -- 151},
publisher = {Springer International Publishing},
address = {Berlin, Germany},
abstract = {A first step is taken towards incorporating emotional processing into Sigma, a cognitive architecture that is grounded in graphical models, with the addition of appraisal variables for expectedness and desirability plus their initial implications for attention at two levels of the control hierarchy. The results leverage many of Sigma's existing capabilities but with a few key additions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Lucas, Gale; Gratch, Jonathan; Rizzo, Albert; Morency, Louis-Philippe
Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews Journal Article
In: IEEE Transactions on Affective Computing (in press; doi: 10.1109/TAFFC.2015.2440264), no. 99, 2015, ISSN: 1949-3045.
@article{scherer_self-reported_2015,
title = {Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews},
author = {Stefan Scherer and Gale Lucas and Jonathan Gratch and Albert Rizzo and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Self-reported%20symptoms%20of%20depression%20and%20PTSD%20are%20associated%20with%20reduced%20vowel%20space%20in%20screening%20interviews.pdf},
doi = {10.1109/TAFFC.2015.2440264},
issn = {1949-3045},
year = {2015},
date = {2015-06-01},
journal = {IEEE Transactions on Affective Computing (in press; doi: 10.1109/TAFFC.2015.2440264)},
number = {99},
abstract = {Reduced frequency range in vowel production is a well documented speech characteristic of individuals with psychological and neurological disorders. Affective disorders such as depression and post-traumatic stress disorder (PTSD) are known to influence motor control and in particular speech production. The assessment and documentation of reduced vowel space and reduced expressivity often either rely on subjective assessments or on analysis of speech under constrained laboratory conditions (e.g. sustained vowel production, reading tasks). These constraints render the analysis of such measures expensive and impractical. Within this work, we investigate an automatic unsupervised machine learning based approach to assess a speaker’s vowel space. Our experiments are based on recordings of 253 individuals. Symptoms of depression and PTSD are assessed using standard self-assessment questionnaires and their cut-off scores. The experiments show a significantly reduced vowel space in subjects that scored positively on the questionnaires. We show the measure’s statistical robustness against varying demographics of individuals and articulation rate. The reduced vowel space for subjects with symptoms of depression can be explained by the common condition of psychomotor retardation influencing articulation and motor control. These findings could potentially support treatment of affective disorders, like depression and PTSD in the future.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Knowles, Megan; Lucas, Gale; Baumeister, Roy; Gardner, Wendi
Choking Under Social Pressure: Social Monitoring Among the Lonely Journal Article
In: Personality and Social Psychology Bulletin, vol. 41, no. 6, pp. 805–821, 2015, ISSN: 0146-1672, 1552-7433.
@article{knowles_choking_2015,
title = {Choking Under Social Pressure: Social Monitoring Among the Lonely},
author = {Megan Knowles and Gale Lucas and Roy Baumeister and Wendi Gardner},
url = {http://ict.usc.edu/pubs/Choking%20Under%20Social%20Pressure%20-%20Social%20Monitoring%20Among%20the%20Lonely.pdf},
doi = {10.1177/0146167215580775},
issn = {0146-1672, 1552-7433},
year = {2015},
date = {2015-06-01},
journal = {Personality and Social Psychology Bulletin},
volume = {41},
number = {6},
pages = {805--821},
abstract = {Lonely individuals may decode social cues well but have difficulty putting such skills to use precisely when they need them—in social situations. In four studies, we examined whether lonely people choke under social pressure by asking participants to complete social sensitivity tasks framed as diagnostic of social skills or nonsocial skills. Across studies, lonely participants performed worse than nonlonely participants on social sensitivity tasks framed as tests of social aptitude, but they performed just as well or better than the nonlonely when the same tasks were framed as tests of academic aptitude. Mediational analyses in Study 3 and misattribution effects in Study 4 indicate that anxiety plays an important role in this choking effect. This research suggests that lonely individuals may not need to acquire social skills to escape loneliness; instead, they must learn to cope with performance anxiety in interpersonal interactions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chatterjee, Moitreya; Leuski, Anton
CRMActive: An Active Learning Based Approach for Effective Video Annotation and Retrieval Inproceedings
In: Proceedings of ACM International Conference on Multimedia Retrieval (ICMR), pp. 535–538, ACM, Shanghai, China, 2015.
@inproceedings{chatterjee_crmactive_2015,
title = {CRMActive: An Active Learning Based Approach for Effective Video Annotation and Retrieval},
author = {Moitreya Chatterjee and Anton Leuski},
url = {http://ict.usc.edu/pubs/CRMActive%20-%20An%20Active%20Learning%20Based%20Approach%20for%20Effective%20Video%20Annotation%20and%20Retrieval.pdf},
doi = {10.1145/2671188.2749342},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of ACM International Conference on Multimedia Retrieval (ICMR)},
pages = {535--538},
publisher = {ACM},
address = {Shanghai, China},
abstract = {Conventional multimedia annotation/retrieval systems such as Normalized Continuous Relevance Model (NormCRM) [7]require a fully labeled training data for a good performance. Active Learning, by determining an order for labeling the training data, allows for a good performance even before the training data is fully annotated. In this work we propose an active learning algorithm, which combines a novel measure of sample uncertainty with a novel clustering-based approach for determining sample density and diversity and integrate it with NormCRM. The clusters are also iteratively re⬚ned to ensure both feature and label-level agreement among samples. We show that our approach outperforms multiple baselines both on a new, open dataset and on the popular TRECVID corpus at both the tasks of annotation and text-based retrieval of videos.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Leuski, Anton; Maio, Heather; Mor-Barak, Tomer; Gordon, Carla; Traum, David
How Many Utterances Are Needed to Support Time-Offset Interaction? Inproceedings
In: Proceedings of FLAIRS 28, pp. 144–149, AAAI Press, Hollywood, FL, 2015, ISBN: 978-1-57735-730-8.
@inproceedings{artstein_how_2015,
title = {How Many Utterances Are Needed to Support Time-Offset Interaction?},
author = {Ron Artstein and Anton Leuski and Heather Maio and Tomer Mor-Barak and Carla Gordon and David Traum},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS15/paper/view/10442},
isbn = {978-1-57735-730-8},
year = {2015},
date = {2015-05-01},
booktitle = {Proceedings of FLAIRS 28},
pages = {144--149},
publisher = {AAAI Press},
address = {Hollywood, FL},
abstract = {A set of several hundred recorded statements by a single speaker is sufficient to address unrestricted questions and sustain short conversations on a circumscribed topic. Statements were recorded by Pinchas Gutter, a Holocaust survivor, talking about his personal experiences before, during and after the Holocaust. These statements were delivered to participants in conversation, using a “Wizard of Oz” system, where live operators select an appropriate reaction to each user utterance in real time. Even though participants were completely unconstrained in the questions they could ask, the recorded statements were able to directly address at least 58% of user questions. The unanswered questions were then analyzed to identify gaps, and additional statements were recorded to fill the gaps. The statements will be put in an automated system using existing language understanding technology, to create the first full working system of time-offset interaction, allowing a live conversation with a real human who is not present for the conversation in real time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale; Gratch, Jonathan
An Effective Conversation Tactic for Creating Value over Repeated Negotiations Inproceedings
In: Proceedings of the 2015 International Conference on Autonomous Agents and Multiagent Systems, pp. 1567–1576, International Foundation for Autonomous Agents and Multiagent Systems, Istanbul, Turkey, 2015.
@inproceedings{mell_effective_2015,
title = {An Effective Conversation Tactic for Creating Value over Repeated Negotiations},
author = {Johnathan Mell and Gale Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/An%20Effective%20Conversation%20Tactic%20for%20Creating%20Value%20over%20Repeated%20Negotiations.pdf},
year = {2015},
date = {2015-05-01},
booktitle = {Proceedings of the 2015 International Conference on Autonomous Agents and Multiagent Systems},
pages = {1567--1576},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Istanbul, Turkey},
abstract = {Automated negotiation research focuses on getting the most value from a single negotiation, yet real-world settings often involve repeated serial negotiations between the same parties. Repeated negotiations are interesting because they allow the discovery of mutually beneficial solutions that don’t exist within the confines of a single negotiation. This paper introduces the notion of Pareto efficiency over time to formalize this notion of value-creation through repeated interactions. We review literature from human negotiation research and identify a dialog strategy, favors and ledgers, that facilitates this process. As part of a longer-term effort to build intelligent virtual humans that can train human negotiators, we create a conversational agent that instantiates this strategy, and assess its effectiveness with human users, using the established Colored Trails negotiation testbed. In an empirical study involving a series of repeated negotiations, we show that humans are more likely to discover Pareto optimal solutions overtime when matched with our favor-seeking agent. Further, an agent that asks for favors during early negotiations, regardless of whether these favors are ever repaid, leads participants to discover more joint value in later negotiations, even under the traditional definition of Pareto optimality within a single negotiation. Further, agents that match their words with deeds (repay their favors) create the most value for themselves. We discuss the implications of these findings for agents that engage in long-term interactions with human users.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; McCubbins, Mathew D.; Turner, Mark
Against Game Theory Incollection
In: Emerging Trends in the Social and Behavioral Sciences: An Interdisciplinary, Searchable, and Linkable Resource, pp. 1–16, John Wiley & Sons, Inc., Hoboken, NJ, 2015, ISBN: 978-1-118-90077-2.
@incollection{lucas_against_2015,
title = {Against Game Theory},
author = {Gale M. Lucas and Mathew D. McCubbins and Mark Turner},
url = {http://ict.usc.edu/pubs/Against%20GameTheory.pdf},
isbn = {978-1-118-90077-2},
year = {2015},
date = {2015-05-01},
booktitle = {Emerging Trends in the Social and Behavioral Sciences: An Interdisciplinary, Searchable, and Linkable Resource},
pages = {1--16},
publisher = {John Wiley & Sons, Inc.},
address = {Hoboken, NJ},
abstract = {People make choices. Often, the outcome depends on choices other people make. What mental steps do people go through when making such choices? Game theory, the most influential model of choice in economics and the social sciences, offers an answer, one based on games of strategy like chess and checkers: the chooser considers the choices that others will make and makes a choice that will lead to a better outcome for the chooser, given all those choices by other people. It is universally established in the social sciences that classical game theory (even when heavily modified) is bad at predicting behavior. But instead of abandoning classical game theory, those in the social sciences have mounted a rescue operation under the name of “behavioral game theory.” Its main tool is to propose systematic deviations from the predictions of game theory, deviations that arise from character type, for example. Other deviations purportedly come from cognitive overload or limitations. The fundamental idea of behavioral game theory is that, if we know the deviations, then we can correct our predictions accordingly, and so get it right. There are two problems with this rescue operation, each of them fatal. (1) For a chooser, contemplating the range of possible deviations, as there are many dozens, actually makes it exponentially harder to figure out a path to an outcome. This makes the theoretical models useless for modeling human thought or human behavior in general. (2) Modeling deviations is helpful only if the deviations are consistent, so that scientists (and indeed decision-makers) can make 2 predictions about future choices on the basis of past choices. But the deviations are not consistent. In general, deviations from classical models are not consistent for any individual from one task to the next or between individuals for the same task. In addition, people’s beliefs are in general not consistent with their choices. Accordingly, all hope is hollow that we can construct a general behavioral game theory. What can replace it? We survey some of the emerging candidates.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Shim, Han Suk; Park, Sunghyun; Chatterjee, Moitreya; Scherer, Stefan; Sagae, Kenji; Morency, Louis-Philippe
ACOUSTIC AND PARA-VERBAL INDICATORS OF PERSUASIVENESS IN SOCIAL MULTIMEDIA Inproceedings
In: Proceeding of ICASSP 2015, pp. 2239 – 2243, IEEE, Brisbane, Australia, 2015.
@inproceedings{shim_acoustic_2015,
title = {ACOUSTIC AND PARA-VERBAL INDICATORS OF PERSUASIVENESS IN SOCIAL MULTIMEDIA},
author = {Han Suk Shim and Sunghyun Park and Moitreya Chatterjee and Stefan Scherer and Kenji Sagae and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/ACOUSTIC%20AND%20PARA-VERBAL%20INDICATORS%20OF%20PERSUASIVENESS%20IN%20SOCIAL%20MULTIMEDIA.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Proceeding of ICASSP 2015},
pages = {2239 -- 2243},
publisher = {IEEE},
address = {Brisbane, Australia},
abstract = {Persuasive communication and interaction play an important and pervasive role in many aspects of our lives. With the rapid growth of social multimedia websites such as YouTube, it has become more important and useful to understand persuasiveness in the context of online social multimedia content. In this paper, we present our resultsof conducting various analyses of persuasiveness in speech with our multimedia corpus of 1,000 movie review videos obtained from ExpoTV.com, a popular social multimedia website. Our experiments firstly show that a speaker’s level of persuasiveness can be predicted from acoustic characteristics and para-verbal cues related to speech fluency. Secondly, we show that taking acoustic cues in different time periods of a movie review can improve the performance of predicting a speaker’s level of persuasiveness. Lastly, we show that a speaker’s positive or negative attitude toward a topic influences the prediction performance as well.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Morency, Louis-Philippe; Gratch, Jonathan; Pestian, John
REDUCED VOWEL SPACE IS A ROBUST INDICATOR OF PSYCHOLOGICAL DISTRESS: A CROSS-CORPUS ANALYSIS Inproceedings
In: Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP), pp. 4789–4793, IEEE, Brisbane, Australia, 2015.
@inproceedings{scherer_reduced_2015,
title = {REDUCED VOWEL SPACE IS A ROBUST INDICATOR OF PSYCHOLOGICAL DISTRESS: A CROSS-CORPUS ANALYSIS},
author = {Stefan Scherer and Louis-Philippe Morency and Jonathan Gratch and John Pestian},
url = {http://ict.usc.edu/pubs/REDUCED%20VOWEL%20SPACE%20IS%20A%20ROBUST%20INDICATOR%20OF%20PSYCHOLOGICAL%20DISTRESS-A%20CROSS-CORPUS%20ANALYSIS.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
pages = {4789--4793},
publisher = {IEEE},
address = {Brisbane, Australia},
abstract = {Reduced frequency range in vowel production is a well documented speech characteristic of individuals’ with psychological and neurological disorders. Depression is known to influence motor control and in particular speech production. The assessment and documentation of reduced vowel space and associated perceived hypoarticulation and reduced expressivity often rely on subjective assessments. Within this work, we investigate an automatic unsupervised machine learning approach to assess a speaker’s vowel space within three distinct speech corpora and compare observed vowel space measures of subjects with and without psychological conditions associated with psychological distress, namely depression, post-traumatic stress disorder (PTSD), and suicidality. Our experiments are based on recordings of over 300 individuals. The experiments show a significantly reduced vowel space in conversational speech for depression, PTSD, and suicidality. We further observe a similar trend of reduced vowel space for read speech. A possible explanation for a reduced vowel space is psychomotor retardation, a common symptom of depression that influences motor control and speech production.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul
Supraarchitectural Capability Integration: From Soar to Sigma Inproceedings
In: Proceedings of the 13th International Conference on Cognitive Modeling, 2015, Groningen, The Netherlands, 2015.
@inproceedings{rosenbloom_supraarchitectural_2015,
title = {Supraarchitectural Capability Integration: From Soar to Sigma},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Supraarchitectural%20Capability%20Integration%20-%20From%20Soar%20to%20Sigma.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Proceedings of the 13th International Conference on Cognitive Modeling, 2015},
address = {Groningen, The Netherlands},
abstract = {Integration across capabilities, both architectural and supraarchitectural, is critical for cognitive architectures. Here we revisit a classic failure of supraarchitectural capability integration in Soar, involving data chunking, to understand better both its source and how it and related integration issues can be overcome via three general extensions in Sigma.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ward, Nigel G.; DeVault, David
Ten Challenges in Highly-Interactive Dialog Systems Inproceedings
In: Proceedings of AAAI 2015 Spring Symposium, Palo Alto, CA, 2015.
@inproceedings{ward_ten_2015,
title = {Ten Challenges in Highly-Interactive Dialog Systems},
author = {Nigel G. Ward and David DeVault},
url = {http://ict.usc.edu/pubs/Ten%20Challenges%20in%20Highly-Interactive%20Dialog%20Systems.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {Proceedings of AAAI 2015 Spring Symposium},
address = {Palo Alto, CA},
abstract = {Systems capable of highly-interactive dialog have recently been developed in several domains. This paper considers how to build on these successes to make systems more robust, easier to develop, more adaptable, and more scientifically significant.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
DeVault, David; Mell, Jonathan; Gratch, Jonathan
Toward Natural Turn-Taking in a Virtual Human Negotiation Agent Inproceedings
In: AAAI Spring Symposium on Turn-taking and Coordination in Human-Machine Interaction, pp. 2–9, AAAI Press, Palo Alto, California, 2015.
@inproceedings{devault_toward_2015,
title = {Toward Natural Turn-Taking in a Virtual Human Negotiation Agent},
author = {David DeVault and Jonathan Mell and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Toward%20Natural%20Turn-Taking%20in%20a%20Virtual%20Human%20Negotiation%20Agent.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {AAAI Spring Symposium on Turn-taking and Coordination in Human-Machine Interaction},
pages = {2--9},
publisher = {AAAI Press},
address = {Palo Alto, California},
abstract = {In this paper we assess our progress toward creating a virtual human negotiation agent with fluid turn-taking skills. To facilitate the design of this agent, we have collected a corpus of human-human negotiation roleplays as well as a corpus of Wizard-controlled human-agent negotiations in the same roleplay scenario.We compare the natural turn-taking behavior in our human-human corpus with that achieved in our Wizard-of-Oz corpus, and quantify our virtual human’s turn-taking skills using a combination of subjective and objective metrics. We also discuss our design for a Wizard user interface to support real-time control of the virtual human’s turntaking and dialogue behavior, and analyze our wizard’s usage of this interface.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2006
Roque, Antonio; Ai, Hua; Traum, David
Evaluation of an Information State-Based Dialogue Manager Inproceedings
In: Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue, Potsdam, Germany, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_evaluation_2006,
title = {Evaluation of an Information State-Based Dialogue Manager},
author = {Antonio Roque and Hua Ai and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20an%20Information%20State-Based%20Dialogue%20Manager.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue},
address = {Potsdam, Germany},
abstract = {We describe an evaluation of an information state-based dialogue manager by measuring its accuracy in information state component updating.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Lindheim, Richard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation Incollection
In: Multimodal Intelligent Information Presentation, vol. 27, pp. 305–321, Springer, Netherlands, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{swartout_simulation_2006,
title = {Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Richard Lindheim and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/SIMULATION%20MEETS%20HOLLYWOOD-%20Integrating%20Graphics,%20Sound,%20Story%20and%20Character%20for%20Immersive%20Simulation.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Multimodal Intelligent Information Presentation},
volume = {27},
pages = {305--321},
publisher = {Springer},
address = {Netherlands},
abstract = {The Institute for Creative Technologies was created at the University of Southern California with the goal of bringing together researchers in simulation technology to collaborate with people from the entertainment industry. The idea was that much more compelling simulations could be developed if researchers who understood state-of-the-art simulation technology worked together with writers and directors who knew how to create compelling stories and characters. This paper presents our first major effort to realize that vision, the Mission Rehearsal Exercise Project, which confronts a soldier trainee with the kinds of dilemmas he might reasonably encounter in a peacekeeping operation. The trainee is immersed in a synthetic world and interacts with virtual humans: artificially intelligent and graphically embodied conversational agents that understand and generate natural language, reason about world events and respond appropriately to the trainee's actions or commands. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have also joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. In this paper we describe the Mission Rehearsal Exercise system and the insights gained through this large-scale integration.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Leuski, Anton; Pair, Jarrell; Traum, David; McNerney, Peter J.; Georgiou, Panayiotis G.; Patel, Ronakkumar
How to Talk to a Hologram Inproceedings
In: Proceedings of the 11th International Conference on Intelligent User Interfaces, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_how_2006,
title = {How to Talk to a Hologram},
author = {Anton Leuski and Jarrell Pair and David Traum and Peter J. McNerney and Panayiotis G. Georgiou and Ronakkumar Patel},
url = {http://ict.usc.edu/pubs/How%20to%20Talk%20to%20a%20Hologram.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 11th International Conference on Intelligent User Interfaces},
address = {Sydney, Australia},
abstract = {There is a growing need for creating life-like virtual human simulations that can conduct a natural spoken dialog with a human student on a predefined subject. We present an overview of a spoken-dialog system that supports a person interacting with a full-size hologram-like virtual human character in an exhibition kiosk settings. We also give a brief summary of the natural language classification component of the system and describe the experiments we conducted with the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Mao, Wenji; Marsella, Stacy C.
Modeling Social Emotions and Social Attributions Incollection
In: Sun, R. (Ed.): Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation, Cambridge University Press, 2006.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{gratch_modeling_2006,
title = {Modeling Social Emotions and Social Attributions},
author = {Jonathan Gratch and Wenji Mao and Stacy C. Marsella},
editor = {R. Sun},
url = {http://ict.usc.edu/pubs/Modeling%20Social%20Emotions%20and%20Social%20Attributions.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation},
publisher = {Cambridge University Press},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Toward Virtual Humans Journal Article
In: AI Magazine, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{swartout_toward_2006,
title = {Toward Virtual Humans},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/Toward%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-01-01},
journal = {AI Magazine},
abstract = {This paper describes the virtual humans developed as part of the Mission Rehearsal Exercise project, a virtual reality-based training system. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. We describe the key capabilities of the virtual humans, including task representation and reasoning, natural language dialogue, and emotion reasoning, and show how these capabilities are integrated to provide more human-level intelligence than would otherwise be possible.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2005
Traum, David; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan
Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis Inproceedings
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_fight_2005,
title = {Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis},
author = {David Traum and William Swartout and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Fight,%20Flight,%20or%20Negotiate-%20Believable%20Strategies%20for%20Conversing%20under%20Crisis.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {This paper des ribes a model of onversation strategies implemented in virtual humans designed to help people learn negotiation skills. We motivate and dis uss these strategies and their use to allow a virtual human to engage in omplex adversarial negotiation with a human trainee. Choi e of strategy depends on both the personality of the agent and assessment of the likelihood that the negotiation an be bene ial. Exe ution of strategies an be performed by hoosing spe i dialogue behaviors su h as whether and how to respond to a proposal. Current assessment of the value of the topi , the utility of the strategy, and aÆliation toward the other onversants an be used to dynami ally hange strategies throughout the ourse of a onversation. Examples will be given from the SASO-ST proje t, in whi h a trainee learns to negotiate by intera ting with virtual humans who employ these strategies.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kallman, Marcelo; Marsella, Stacy C.
Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans Inproceedings
In: International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kallman_hierarchical_2005,
title = {Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans},
author = {Marcelo Kallman and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Hierarchical%20Motion%20Controllers%20for%20Real-Time%20Autonomous%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Continuous and synchronized whole-body motions are essential for achieving believable autonomous virtual humans in interactive applications. We present a new motion control architecture based on generic controllers that can be hierarchically interconnected and reused in real-time. The hierarchical organization implies that leaf controllers are motion generators while the other nodes are connectors, performing operations such as interpolation, blending, and precise scheduling of children controllers. We also describe how the system can correctly handle the synchronization of gestures with speech in order to achieve believable conversational characters. For that purpose, different types of controllers implement a generic model of the different phases of a gesture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.; Kenny, Patrick G.; Hovy, Eduard; Narayanan, Shrikanth; Fast, Edward; Martinovski, Bilyana; Baghat, Rahul; Robinson, Susan; Marshall, Andrew; Wang, Dagen; Gandhe, Sudeep; Leuski, Anton
Dealing with Doctors: A Virtual Human for Non-team Interaction Inproceedings
In: 6th SIGdial Conference on Discourse and Dialogue, Lisbon, Portugal, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_dealing_2005,
title = {Dealing with Doctors: A Virtual Human for Non-team Interaction},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella and Patrick G. Kenny and Eduard Hovy and Shrikanth Narayanan and Edward Fast and Bilyana Martinovski and Rahul Baghat and Susan Robinson and Andrew Marshall and Dagen Wang and Sudeep Gandhe and Anton Leuski},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Doctors.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {6th SIGdial Conference on Discourse and Dialogue},
address = {Lisbon, Portugal},
abstract = {We present a virtual human do tor who an engage in multi-modal negotiation dialogue with people from other organizations. The do tor is part of the SASO-ST system, used for training for non-team intera tions},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
Virtual Humans for non-team interaction training Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids, Utrecht, Netherlands, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_virtual_2005,
title = {Virtual Humans for non-team interaction training},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20non-team%20interaction%20training.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids},
address = {Utrecht, Netherlands},
abstract = {We describe a model of virtual humans to be used in training for non-team interactions, such as negotiating with people from other organizations. The virtual humans build on existing task, dialogue, and emotion models, with an added model of trust, which are used to understand and produce interactional moves. The model has been implemented within an agent in the SASO-ST system, and some example dialogues are given, illustrating the necessity for building social bonds.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a computational model of emotion Journal Article
In: Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004, vol. 11, no. 1, pp. 23–43, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_evaluating_2005,
title = {Evaluating a computational model of emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20computational%20model%20of%20emotion.pdf},
year = {2005},
date = {2005-07-01},
journal = {Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004},
volume = {11},
number = {1},
pages = {23--43},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we evaluate them against the phenomena they purport to model. In this paper, we present one method to evaluate an emotion model that compares the behavior of the model against human behavior using a standard clinical instrument for assessing human emotion and coping. We use this method to evaluate the Emotion and Adaptation (EMA) model of emotion Gratch and Marsella. The evaluation highlights strengths of the approach and identifies where the model needs further development.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Ettaile, Emil; Gandhe, Sudeep; Georgiou, Panayiotis G.; Knight, Kevin; Marcu, Daniel; Narayanan, Shrikanth; Traum, David; Belvin, Robert
Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues Inproceedings
In: Proceedings of the ACL Interactive Poster and Demonstration Sessions, pp. 89–92, Ann Arbor, MI, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ettaile_transonics_2005,
title = {Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues},
author = {Emil Ettaile and Sudeep Gandhe and Panayiotis G. Georgiou and Kevin Knight and Daniel Marcu and Shrikanth Narayanan and David Traum and Robert Belvin},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {Proceedings of the ACL Interactive Poster and Demonstration Sessions},
pages = {89--92},
address = {Ann Arbor, MI},
abstract = {We briefly describe a two-way speech-to-speech English-Farsi translation system prototype developed for use in doctorpatient interactions. The overarching philosophy of the developers has been to create a system that enables effective communication, rather than focusing on maximizing component-level performance. The discussion focuses on the general approach and evaluation of the system by an independent government evaluation team.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
A Computational Model of Dynamic Perceptual Attention for Virtual Humans Inproceedings
In: Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation, Universal City, CA, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_computational_2005,
title = {A Computational Model of Dynamic Perceptual Attention for Virtual Humans},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Dynamic%20Perceptual%20Attention%20for%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation},
address = {Universal City, CA},
abstract = {An important characteristic of a virtual human is the ability to direct its perceptual attention to objects and locations in a virtual environment in a manner that looks believable and serves a functional purpose. We have developed a computational model of perceptual attention that mediates top-down and bottom-up attention processes of virtual humans in virtual environments. In this paper, we propose a perceptual attention model that will integrate perceptual attention toward objects and locations in the environment with the need to look at other parties in a social context.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nijholt, Anton; Traum, David
The Virtuality Continuum Revisited Inproceedings
In: CHI 2005 Workshop on the Virtuality Continuum Revisited, Portland, OR, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nijholt_virtuality_2005,
title = {The Virtuality Continuum Revisited},
author = {Anton Nijholt and David Traum},
url = {http://ict.usc.edu/pubs/The%20Virtuality%20Continuum%20Revisited.pdf},
year = {2005},
date = {2005-04-01},
booktitle = {CHI 2005 Workshop on the Virtuality Continuum Revisited},
address = {Portland, OR},
abstract = {We survey the themes and the aims of a workshop devoted to the state-of-the-art virtuality continuum. In this continuum, ranging from fully virtual to real physical environments, allowing for mixed, augmented and desktop virtual reality, several perspectives can be taken. Originally, the emphasis was on display technologies. Here we take the perspective of the inhabited environment, that is, environments positioned somewhere on this continuum that are inhabited by virtual (embodied) agents, that interact with each other and with their human partners. Hence, we look at it from the multi-party interaction perspective. In this workshop we will investigate the current state of the art, its shortcomings and a future research agenda.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Mao, Wenji; Gratch, Jonathan; Marsella, Stacy C.
Mitigation Theory: An Integrated Approach Inproceedings
In: Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci), Stresa, Italy, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{martinovski_mitigation_2005,
title = {Mitigation Theory: An Integrated Approach},
author = {Bilyana Martinovski and Wenji Mao and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Mitigation%20Theory-%20An%20Integrated%20Approach.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci)},
address = {Stresa, Italy},
abstract = {The purpose of this paper is to develop a theoretical model of mitigation by integrating cognitive and discourse approaches to appraisal and coping. Mitigation involves strategic, emotional, linguistic, and Theory of Mind processes on different levels of consciousness. We emphasize that discourse analysis can assist our understanding of these processes.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Evaluating Social Causality and Responsibility Models: An Initial Report Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 03 2005, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_evaluating_2005,
title = {Evaluating Social Causality and Responsibility Models: An Initial Report},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR-03-2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 03 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent sys- tems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich believability and cognitive capabili- ties of social intelligent agents. In this report, we present a general computational model of social causality and responsibility, and empirical results of a preliminary evaluation of the model in comparison with several other approaches.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Natural Behavior of a Listening Agent Inproceedings
In: Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA), pp. 25–36, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{maatman_natural_2005,
title = {Natural Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Natural%20Behavior%20of%20a%20Listening%20Agent.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA)},
pages = {25--36},
address = {Kos, Greece},
abstract = {In contrast to the variety of listening behaviors produced in human-to-human interaction, most virtual agents sit or stand passively when a user speaks. This is a reflection of the fact that although the correct responsive behavior of a listener during a conversation is often related to the semantics, the state of current speech understanding technology is such that semantic information is unavailable until after an utterance is complete. This paper will illustrate that appropriate listening behavior can also be generated by other features of a speaker's behavior that are available in real time such as speech quality, posture shifts and head movements. This paper presents a mapping from these real-time obtainable features of a human speaker to agent listening behaviors.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dialog Simulation for Background Characters Inproceedings
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_dialog_2005,
title = {Dialog Simulation for Background Characters},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dialog%20Simulation%20for%20Background%20Characters.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Background characters in virtual environments do not require the same amount of processing that is usually required by main characters, however we want simulation that is more believable than random behavior. We describe an algorithm that generates bhavior for background characters involved in conversation that supports dynamic changes to conversation group structure. We present an evaluation of this algorithm and make suggestions on how to further improve believability of the simulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Responsive Behavior of a Listening Agent Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2005, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@techreport{maatman_responsive_2005,
title = {Responsive Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/ICT-TR.02.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 02 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {The purpose of this assignment is twofold. First the possibility of generating real time responsive behavior is evaluated in order to create a more human-like agent. Second, the effect of the behavior of the agent on the human interactor is evaluated. The main motivation for the focus on responsive gestures is because much research has been done already on gestures that accompany the speaker, and nothing on gesture that accompany the listener, although responsiveness is a crucial part of a conversation. The responsive behavior of a virtual agent consists of performing gestures during the time a human is speaking to the agent. To generate the correct gestures, first a literature research is carried out, from which is concluded that with the current of the current Natural Language Understanding technology, it is not possible to extract semantic features of the human speech in real time. Thus, other features have to be considered. The result of the literature research is a basic mapping between real time obtainable features and their correct responsive behavior: - if the speech contains a relatively long period of low pitch then perform a head nod. - if the speech contains relatively high intensity then perform a head nod - if the speech contains disfluency then perform a posture shift, gazing behavior or a frown - if the human performs a posture shift then mirror this posture shift - if the human performs a head shake then mirror this head shake - if the human performs major gazing behavior then mimic this behavior A design has been made to implement this mapping into the behavior of a virtual agent and this design has been implemented which results in two programs. One to mirror the physical features of the human and one to extract the speech features from the voice of the human. The two programs are combined and the effect of the resulting behavior on the human interactor has been tested. The results of these tests are that the performing of responsive behavior has a positive effect on the natural behavior of a virtual agent and thus looks promising for future research. However, the gestures proposed by this mapping are not always context-independent. Thus, much refinement is still to be done and more functionality can be added to improve the responsive behavior. The conclusion of this research is twofold. First the performing of responsive behaviors in real time is possible with the presented mapping and this results in a more natural behaving agent. Second, some responsive behavior is still dependant of semantic information. This leaves open the further enhancement of the presented mapping in order to increase the responsive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Rizzo, Albert; Pair, Jarrell; McNerney, Peter J.; Eastlund, Ernie; Manson, Brian; Gratch, Jonathan; Hill, Randall W.; Swartout, William
Development of a VR Therapy Application for Iraq War Military Personnel with PTSD Incollection
In: Studies in Health Technology and Informatics, vol. 111, no. 13, pp. 407+413, 13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@incollection{rizzo_development_2005-1,
title = {Development of a VR Therapy Application for Iraq War Military Personnel with PTSD},
author = {Albert Rizzo and Jarrell Pair and Peter J. McNerney and Ernie Eastlund and Brian Manson and Jonathan Gratch and Randall W. Hill and William Swartout},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20VR%20Therapy%20Application%20for%20Iraq%20War%20Veterans%20with%20PTSD.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Studies in Health Technology and Informatics},
volume = {111},
number = {13},
pages = {407+413},
address = {13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA},
series = {Medicine Meets Virtual Reality},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 returning Iraq War military personnel are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure therapy has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of an Iraq War PTSD VR application that is being created from the virtual assets that were initially developed for theX-Box game entitled Full Spectrum Warrior which was inspired by a combat tactical training simulation, Full Spectrum Command.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gratch, Jonathan; Marsella, Stacy C.
Lessons from Emotion Psychology for the Design of Lifelike Characters Journal Article
In: Applied Artificial Intelligence Journal, vol. 19, pp. 215–233, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_lessons_2005,
title = {Lessons from Emotion Psychology for the Design of Lifelike Characters},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Lessons%20from%20Emotion%20Psychology%20for%20the%20Design%20of%20Lifelike%20Characters.pdf},
year = {2005},
date = {2005-01-01},
journal = {Applied Artificial Intelligence Journal},
volume = {19},
pages = {215--233},
abstract = {This special issue describes a number of applications that utilize lifelike characters that teach indirectly, by playing some role in a social interaction with a user. The design of such systems reflects a compromise between competing, sometimes unarticulated de- mands: they must realistically exhibit the behaviors and characteristics of their role, they must facilitate the desired learning, and they must work within the limitations of current technology, and there is little theoretical or empirical guidance on the impact of these compromises on learning. Our perspective on this problem is shaped by our interest in the role of emotion and emotional behaviors in such forms of learning. In recent years, there has been an explosion of interest in the role of emotion in the design of virtual hu- mans. The techniques and motivations underlying these various efforts can seem, from an outsider's perspective, as bewildering and multifaceted as the concept of emotion itself is generally accused of being. Drawing on insights from emotion psychology, this article attempts to clarify for the designers of educational agents the various theoretical perspec- tives on the concept of emotion with the aim of giving guidance to designers of educa- tional agents.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kock, Arien; Gratch, Jonathan
An Evaluation of Automatic Lip-syncing Methods for Game Environments Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2005, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{kock_evaluation_2005,
title = {An Evaluation of Automatic Lip-syncing Methods for Game Environments},
author = {Arien Kock and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR.01.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 01 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Lip-synching is the production of articulator motion corresponding to a given audible utterance. The Mission Rehearsal Exercise training system requires lip-synching to increase the believability of its virtual agents. In this report I document the selection, exploration, evaluation and comparison of several candidate lip-synching systems, ending with a recommendation. The evaluation focuses on the believability of articulators' expression, the foreseeable difficulty of integration into MRE’s architecture, the support for facial expressions related to semantics and prosodic features as well as the scalability of each system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
Controlling the Focus of Perceptual Attention in Embodied Conversational Agents Inproceedings
In: Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems, 2005, ISBN: 1-59593-093-0.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_controlling_2005,
title = {Controlling the Focus of Perceptual Attention in Embodied Conversational Agents},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/Controlling%20the%20Focus%20of%20Perceptual%20Attention%20in%20Embodied%20Conversational%20Agents.pdf},
doi = {10.1145/1082473.1082641},
isbn = {1-59593-093-0},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems},
abstract = {In this paper, we present a computational model of dynamic perceptual attention for virtual humans. The computational models of perceptual attention that we surveyed fell into one of two camps: top-down and bottom-up. Biologically inspired computational models [2] typically focus on the bottom-up aspects of attention, while most virtual humans [1,3,7] implement a top-down form of attention. Bottom-up attention models only consider the sensory information without taking into consideration the saliency based on tasks or goals. As a result, the outcome of a purely bottom-up model will not consistently match the behavior of real humans in certain situations. Modeling perceptual attention as a purely top-down process, however, is also not sufficient for implementing a virtual human. A purely top-down model does not take into account the fact that virtual humans need to react to perceptual stimuli vying for attention. Top-down systems typically handle this in an ad hoc manner by encoding special rules to catch certain conditions in the environment. The problem with this approach is that it does not provide a principled way of integrating the ever-present bottom-up perceptual stimuli with top-down control of attention. This model extends the prior model [7] with perceptual resolution based on psychological theories of human perception [4]. This model allows virtual humans to dynamically interact with objects and other individuals, balancing the demands of goal-directed behavior with those of attending to novel stimuli. This model has been implemented and tested with the MRE Project [5].},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Morie, Jacquelyn; Williams, Josh; Pair, Jarrell; Buckwalter, John Galen
Human Emotional State and its Relevance for Military VR Training Inproceedings
In: Proceedings of the 11th International Conference on Human-Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans, Virtual Worlds
@inproceedings{rizzo_human_2005,
title = {Human Emotional State and its Relevance for Military VR Training},
author = {Albert Rizzo and Jacquelyn Morie and Josh Williams and Jarrell Pair and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Human%20Emotional%20State%20and%20its%20Relevance%20for%20Military%20VR%20Training.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 11th International Conference on Human-Computer Interaction},
address = {Las Vegas, NV},
abstract = {Combat environments by their nature can produce a dramatic range of emotional responses in military personnel. When immersed in the emotional "fog of war," the potential exists for optimal human decision-making and performance of goal-directed activities to be seriously compromised. This may be especially true when combat training is conducted under conditions that lack emotional engagement by the soldier. Real world military training often naturally includes stress induction that aims to promote a similarity of internal emotional stimulus cues with what is expected to be present on the battlefield. This approach to facilitating optimal training effectiveness is supported by a long history of learning theory research. Current Virtual Reality military training approaches are noteworthy in their emphasis on creating hi-fidelity graphic and audio realism with the aim to foster better transfer of training. However, less emphasis is typically placed on the creation of emotionally evocative virtual training scenarios that can induce emotional stress in a manner similar to what is typically experienced under real world training conditions. As well, emotional issues in the post-combat aftermath need to be addressed, as can be seen in the devastating emotional difficulties that occur in some military personnel following combat. This is evidenced by the number of recent medical reports that suggest the incidence of "Vietnam-levels" of combat-related Post Traumatic Stress Disorder symptomatology in returning military personnel from the Iraq conflict. In view of these issues, the USC Institute for Creative Technologies (ICT) has initiated a research program to study emotional issues that are relevant to VR military applications. This paper will present the rationale and status of two ongoing VR research programs at the ICT that address sharply contrasting ends of the emotional spectrum relevant to the military: 1. The Sensory Environments Evaluation (SEE) Project is examining basic factors that underlie emotion as it occurs within VR training environments and how this could impact transfer of training, and 2. The Full Spectrum Warrior (FSW) Post Traumatic Stress Disorder Project which is currently in the process of converting the existing FSW combat tactical simulation training scenario (and X-Box game) into a VR treatment system for the conduct of graduated exposure therapy in Iraq war military personnel with Post Traumatic Stress Disorder.},
keywords = {MedVR, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Social Causality and Responsibility: Modeling and Evaluation Inproceedings
In: Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA), pp. 191–204, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2005,
title = {Social Causality and Responsibility: Modeling and Evaluation},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Causality%20and%20Responsibility-%20Modeling%20and%20Evaluation.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA)},
pages = {191--204},
address = {Kos, Greece},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent systems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich the believability and the cognitive capabilities of social intelligent agents. In this paper, we present a general computational model of social causality and responsibility, and empirically evaluate and compare the model with several other approaches.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Marsella, Stacy C.
Rejection of empathy and its linguistic manifestations Inproceedings
In: Proceedings of Conference on Formal and Informal Negotiation (FINEXIN), Ottowa, Canada, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{martinovski_rejection_2005,
title = {Rejection of empathy and its linguistic manifestations},
author = {Bilyana Martinovski and David Traum and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Rejection%20of%20empathy%20and%20its%20linguistic%20manifestations.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of Conference on Formal and Informal Negotiation (FINEXIN)},
address = {Ottowa, Canada},
abstract = {Trust is a crucial quality in the development of individuals and societies and empathy plays a key role in the formation of trust. Trust and empathy have growing importance in studies of negotiation. However, empathy can be rejected which complicates its role in negotiation. This paper presents a linguistic analysis of empathy by focusing on rejection of empathy in negotiation. Some of the rejections are due to failed recognition of the rejector's needs and desires whereas others have mainly strategic functions gaining momentum in the negotiation. In both cases, rejection of empathy is a phase in the negotiation not a breakdown.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2004
Patel, Jigish; Parker, Robert; Traum, David
Simulation of Small Group Discussions for Middle Level of Detail Crowds Inproceedings
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_simulation_2004,
title = {Simulation of Small Group Discussions for Middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Simulation%20of%20Small%20Group%20Discussions%20for%20Middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {We present an algorithm for animating middle level of detail crowds engaged in conversation. Based on previous work from Padilha and Carletta, this algorithm is used to provide gestures for group characters in an embedded virtual world. The algorithm is implemented and used within the Mission Rehearsal Exercise project at ICT to control Bosnian crowd members.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Leuski, Anton; Traum, David
First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers Inproceedings
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gandhe_first_2004,
title = {First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers},
author = {Sudeep Gandhe and Andrew S. Gordon and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/First%20Steps%20Toward%20Linking%20Dialogues-%20Mediating%20Between%20Free-text%20Questions%20and%20Pre-recorded%20Video%20Answers.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {Pre-recorded video segments can be very compelling for a variety of immersive training purposes, including providing answers to questions in after-action reviews. Answering questions fluently using pre-recorded video poses challenges, however. When humans interact, answers are constructed after questions are posed. When answers are pre-recorded, even if a correct answer exists in a library of video segments, the answer may be phrased in a way that is not coherent with the question. This paper reports on basic research experiments with short "linking dialogues" that mediate between the question and answer to reduce (or eliminate) the incoherence, resulting in more natural human-system interaction. A set of experiments were performed in which links were elicited to bridge between questions from users of an existing training application and selected answers from the system, and then comparisons made with unlinked answers. The results show that a linking dialogue can signiï¬cantly increase the perceived relevance of the system's answers.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Towards a Validated Model of the Influence of Emotion on Human Performance Inproceedings
In: Proceedings of the 24th Army Science Conference, 2004.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_towards_2004,
title = {Towards a Validated Model of the Influence of Emotion on Human Performance},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/TOWARDS%20A%20VALIDATED%20MODEL%20OF%20THE%20INFLUENCE%20OF%20EMOTION%20ON%20HUMAN%20PERFORMANCE.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating the modeling and use of emotion in virtual humans Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_evaluating_2004,
title = {Evaluating the modeling and use of emotion in virtual humans},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20the%20modeling%20and%20use%20of%20emotion%20in%20virtual%20humans.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {New York, NY},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we begin to evaluate them against the phenomena they purport to model. In this paper, we present one methodology to evaluate an emotion model. The methodology is based on comparing the behavior of the computational model against human behavior, using a standard clinical instrument for assessing human emotion and coping. We use this methodology to evaluate the EMA model of emotion. The model did quite well. And, as expected, the comparison helped identify where the model needs further development.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Muller, T. J.; Hartholt, Arno; Marsella, Stacy C.; Gratch, Jonathan; Traum, David
Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{muller_you_2004,
title = {Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue},
author = {T. J. Muller and Arno Hartholt and Stacy C. Marsella and Jonathan Gratch and David Traum},
url = {http://ict.usc.edu/pubs/Do%20you%20want%20to%20talk%20about%20it.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Kloster Irsee, Germany},
abstract = {In this paper, we descrribe an implemented system for emotion-referring dialogue. An agen can engage in emotion-referring dialogue if it first has a model of its own emotions, and secondly has a way of talking about them. We create this facility in MRE Project's virtual humans, building upon the existing emotion and dialogue facilities of these agents.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Jigish; Parker, Robert; Traum, David
Small group discussion simulation for middle Level of Detail Crowds Inproceedings
In: 8th Workshop on Semantics and Pragmatics of Dialogue, Barcelona, Spain, 2004.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_small_2004,
title = {Small group discussion simulation for middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Small%20group%20discussion%20simulation%20for%20middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-07-01},
booktitle = {8th Workshop on Semantics and Pragmatics of Dialogue},
address = {Barcelona, Spain},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
Expressive Behaviors for Virtual Worlds Incollection
In: Life-Like Characters: Tools, Affective Functions, and Applications, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{marsella_expressive_2004,
title = {Expressive Behaviors for Virtual Worlds},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Expressive%20Behaviors%20for%20Virtual%20Worlds.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {Life-Like Characters: Tools, Affective Functions, and Applications},
abstract = {A person's behavior provides signi⬚cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a wide range of cognitive and motor capabilities, a model of task-oriented emotional appraisal and socially situated planning, and a model of how emotions and coping impact physical behavior. We describe the key research issues and approach in each of these prior systems, as well as our integration and its initial implementation in a leadership training system.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a General Model of Emotional Appraisal and Coping Inproceedings
In: AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations, Palo Alto, CA, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_evaluating_2004-1,
title = {Evaluating a General Model of Emotional Appraisal and Coping},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20General%20Model%20of%20Emotional%20Appraisal%20and%20Coping.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations},
address = {Palo Alto, CA},
abstract = {Introduction: In our research, we have developed a general computational model of human emotion. The model attempts to account for both the factors that give rise to emotions as well as the wide-ranging impact emotions have on cognitive and behavioral responses. Emotions influence our beliefs, our decision-making and how we adapt our behavior to the world around us. While most apparent in moments of great stress, emotions sway even the mundane decisions we face in everyday life. Emotions also infuse our social relationships. Our interactions with each other are a source of many emotions and we have developed a range of behaviors that can communicate emotional information as well as an ability to recognize and be influenced by the emotional arousal of others. By virtue of their central role and wide influence, emotion arguably provides the means to coordinate the diverse mental and physical components required to respond to the world in a coherent fashion. (1st Paragraph)},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
A Utility-Based Approach to Intention Recognition Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_utility-based_2004,
title = {A Utility-Based Approach to Intention Recognition},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Utility-Based%20Approach%20to%20Intention%20Recognition.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {New York, NY},
abstract = {Based on the assumption that a rational agent will adopt a plan that maximizes the expected utility, we present a utility-based approach to plan recognition problem in this paper. The approach explicitly takes the observed agent's preferences into consideration, and computes the estimated expected utilities of plans to disambiguate competing hypotheses. Online plan recognition is realized by incrementally using plan knowledge and observations to change state probabilities. We also discuss the work and compare it with other probabilistic models in the paper.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Garg, Saurabh; Martinovski, Bilyana; Robinson, Susan; Stephan, Jens; Tetreault, Joel; Traum, David
Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus Inproceedings
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{garg_evaluation_2004,
title = {Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus},
author = {Saurabh Garg and Bilyana Martinovski and Susan Robinson and Jens Stephan and Joel Tetreault and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Transcription%20and%20Annotation%20tools%20for%20a%20Multi-modal,%20Multi-party%20dialogue%20corpus.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper reviews nine available transcription and annotation tools, considering in particular the special difï¬culties arising from transcribing and annotating multi-party, multi-modal dialogue. Tools are evaluated as to the ability to support the user's annotation scheme, ability to visualize the form of the data, compatibility with other tools, flexibility of data representation, and general user-friendliness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Robinson, Susan; Stephan, Jens
Evaluation of multi-party virtual reality dialogue interaction Inproceedings
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_evaluation_2004,
title = {Evaluation of multi-party virtual reality dialogue interaction},
author = {David Traum and Susan Robinson and Jens Stephan},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20multi-party%20virtual%20reality%20dialogue%20interaction.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {We describe a dialogue evaluation plan for a multi-character virtual reality training simulation. A multi-component evaluation plan is presented, including user satisfaction, intended task completion, recognition rate, and a new annotation scheme for appropriateness. Preliminary results for formative tests are also presented.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Marsella, Stacy C.; Gratch, Jonathan
Emotion and Dialogue in the MRE Virtual Humans Inproceedings
In: Lecture Notes in Computer Science, pp. 117–127, Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_emotion_2004,
title = {Emotion and Dialogue in the MRE Virtual Humans},
author = {David Traum and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emotion%20and%20Dialogue%20in%20the%20MRE%20Virtual%20Humans.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Lecture Notes in Computer Science},
volume = {3068},
pages = {117--127},
address = {Kloster Irsee, Germany},
abstract = {We describe the emotion and dialogue aspects of the virtual agents used in the MRE project at USC. The models of emotion and dialogue started independently, though each makes crucial use of a central task model. In this paper we describe the task model, dialogue model, and emotion model, and the interactions between them.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Hyeok-Soo; Gratch, Jonathan
A Planner-Independent Collaborative Planning Assistant Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 766–773, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_planner-independent_2004,
title = {A Planner-Independent Collaborative Planning Assistant},
author = {Hyeok-Soo Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Planner-Independent%20Collaborative%20Planning%20Assistant.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {766--773},
address = {New York, NY},
abstract = {This article introduces a novel approach to the problem of collaborative planning. We present a method that takes classical one-shot planning techniques - that take a fixed set of goals, initial state, and a domain theory - and adapts them to support the incremental, hierarchical and exploratory nature of collaborative planning that occurs between human planners, and that multi-agent planning systems attempt to support. This approach is planner-independent - in that it could be applied to any classical planning technique - and recasts the problem of collaborative planning as a search through a space of possible inputs to a classical planning system. This article outlines the technique and describes its application to the Mission Rehearsal Exercise, a multi-agent training system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Decision-Theoretic Approach to Plan Recognition Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2004, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_decision-theoretic_2004,
title = {Decision-Theoretic Approach to Plan Recognition},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Decision-Theoretic%20Approach%20to%20Plan%20Recognition.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 01.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this report, first we give a survey of the work in plan recognition field, including the evolution of different approaches, their strength and weaknesses. Then we propose two decision-theoretic approaches to plan recognition problem, which explicitly take outcome utilities into consideration. One is an extension within the probabilistic reasoning framework, by adding utility nodes to belief nets. The other is based on maximizing the estimated expected utility of possible plan. Illustrative examples are given to explain the approaches. Finally, we compare the two approaches presented in the report and summarize the work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Gratch, Jonathan; Marsella, Stacy C.
Technical Details of a Domain-independent Framework for Modeling Emotion Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 04.2004, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@techreport{gratch_technical_2004,
title = {Technical Details of a Domain-independent Framework for Modeling Emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Technical%20Details%20of%20a%20Domain-independent%20Framework%20for%20Modeling%20Emotion.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 04.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This technical report elaborates on the technical details of the EMA model of emotional appraisal and coping. It should be seen as an appendix to the journal article on this topic (Gratch & Marsella, to appear)},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David
Issues in Multiparty Dialogues Journal Article
In: Advances in Agent Communication, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{traum_issues_2004,
title = {Issues in Multiparty Dialogues},
author = {David Traum},
editor = {F. Dignum},
url = {http://ict.usc.edu/pubs/Issues%20in%20Multiparty%20Dialogues.pdf},
year = {2004},
date = {2004-01-01},
journal = {Advances in Agent Communication},
abstract = {This article examines some of the issues in representation of, processing, and automated agent participation in natural language dialgue, considering expansion from two-party dialogue to multi-party dialogue. These issues include some regarding the roles agents play in dialogue, interactive factors, and content management factors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mao, Wenji; Gratch, Jonathan
Social Judgment in Multiagent Interactions Inproceedings
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 210–217, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2004,
title = {Social Judgment in Multiagent Interactions},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Judgment%20in%20Multiagent%20Interactions.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {210--217},
address = {New York, NY},
abstract = {Social judgment is a process of social explanation whereby one evaluates which entities deserve credit or blame for multi-agent activities. Such explanations are a key aspect of inference in a social environment and a model of this process can advance several design components of multi-agent systems. Social judgment underlies social planning, social learning, natural language pragmatics and computational model of emotion. Based on psychological attribution theory, this paper presents a computational approach to forming social judgment based on an agents causal knowledge and communicative interactions with other agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
A Domain-independent Framework for Modeling Emotion Journal Article
In: Journal of Cognitive Systems Research, vol. 5, no. 4, pp. 269–306, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_domain-independent_2004,
title = {A Domain-independent Framework for Modeling Emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/A%20Domain-independent%20Framework%20for%20Modeling%20Emotion.pdf},
year = {2004},
date = {2004-01-01},
journal = {Journal of Cognitive Systems Research},
volume = {5},
number = {4},
pages = {269--306},
abstract = {In this article, we show how psychological theories of emotion shed light on the interaction between emotion and cognition, and thus can inform the design of human-like autonomous agents that must convey these core aspects of human behavior. We lay out a general computational framework of appraisal and coping as a central organizing principle for such systems. We then discuss a detailed domain-independent model based on this framework, illustrating how it has been applied to the problem of generating behavior for a significant social training application. The model is useful not only for deriving emotional state, but also for informing a number of the behaviors that must be modeled by virtual humans such as facial expressions, dialogue management, planning, reacting, and social understanding. Thus, the work is of potential interest to models of strategic decision-making, action selection, facial animation, and social intelligence.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Robinson, Susan; Martinovski, Bilyana; Garg, Saurabh; Stephan, Jens; Traum, David
Issues in corpus development for multi-party multi-modal task-oriented dialogue Inproceedings
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_issues_2004,
title = {Issues in corpus development for multi-party multi-modal task-oriented dialogue},
author = {Susan Robinson and Bilyana Martinovski and Saurabh Garg and Jens Stephan and David Traum},
url = {http://ict.usc.edu/pubs/Issues%20in%20corpus%20development%20for%20multi-party%20multi-modal%20task-oriented%20dialogue.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper describes the development of a multi-modal corpus based on multi-party multi-task driven common goal oriented spoken language interaction. The data consists of approximately 10 hours of audio human simulation radio data and nearly 5 hours of video and audio face-to-face sessions between human trainees and virtual agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2003
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Traum, David; Wang, D.
Transonics: A Speech to Speech System for English-Persian Interactions Inproceedings
In: Proceedings of Automatic Speech Recognition and Understanding Workshop, U.S. Virgin Islands, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{narayanan_transonics_2003,
title = {Transonics: A Speech to Speech System for English-Persian Interactions},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and David Traum and D. Wang},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2003},
date = {2003-12-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop},
address = {U.S. Virgin Islands},
abstract = {In this paper we describe the ï¬rst phase of development of our speech-to-speech system between English and Modern Persian under the DARPA Babylon program. We give an overview of the various system components: the front end ASR, the machine translation system and the speech generation system. Challenges such as the sparseness of available spoken language data and solutions that have been employed to maximize the obtained beneï¬ts from using these limited resources are examined. Efforts in the creation of the user interface and the underlying dialog management system for mediated communication are described.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Robinson, Susan; Garg, Saurabh
Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio Inproceedings
In: Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue), Saarbruecken Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_functions_2003,
title = {Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio},
author = {Bilyana Martinovski and David Traum and Susan Robinson and Saurabh Garg},
url = {http://ict.usc.edu/pubs/Functions%20and%20Patterns%20of%20Speaker%20and%20Addressee%20Identifications%20in%20Distributed%20Complex%20Organizational%20Tasks%20Over%20Radio.pdf},
year = {2003},
date = {2003-09-01},
booktitle = {Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue)},
address = {Saarbruecken Germany},
abstract = {In multiparty dialogue speakers must identify who they are addressing (at least to the addressee, and perhaps to overhearers as well). In non face-toface situations, even the speaker's identity can be unclear. For talk within organizational teams working on critical tasks, such miscommunication must be avoided, and so organizational conventions have been adopted to signal addressee and speaker, (e.g., military radio communications). However, explicit guidelines, such as provided by the military are not always exactly followed (see also (Churcher et al., 1996)). Moreover, even simple actions like identiï¬cations of speaker and hearer can be performed in a variety of ways, for a variety of purposes. The purpose of this paper is to contribute to the understanding and predictability of identiï¬cations of speaker and addressee in radio mediated organization of work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Gratch, Jonathan; Marsella, Stacy C.; Swartout, William; Traum, David
Virtual Humans in the Mission Rehearsal Exercise System Inproceedings
In: Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents), 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{hill_virtual_2003,
title = {Virtual Humans in the Mission Rehearsal Exercise System},
author = {Randall W. Hill and Jonathan Gratch and Stacy C. Marsella and William Swartout and David Traum},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20in%20the%20Mission%20Rehearsal%20Exercise%20System.pdf},
year = {2003},
date = {2003-06-01},
booktitle = {Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents)},
abstract = {How can simulation be made more compelling and effective as a tool for learning? This is the question that the Institute for Creative Technologies (ICT) set out to answer when it was formed at the University of Southern California in 1999, to serve as a nexus between the simulation and entertainment communities. The ultimate goal of the ICT is to create the Experience Learning System (ELS), which will advance the state of the art in virtual reality immersion through use of high-resolution graphics, immersive audio, virtual humans and story-based scenarios. Once fully realized, ELS will make it possible for participants to enter places in time and space where they can interact with believable characters capable of conversation and action, and where they can observe and participate in events that are accessible only through simulation.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Fight the Way You Train:The Role and Limits of Emotions in Training for Combat Journal Article
In: Brown Journal of World Affairs, vol. X, pp. 63–76, 2003.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_fight_2003,
title = {Fight the Way You Train:The Role and Limits of Emotions in Training for Combat},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Fight%20the%20Way%20You%20Train-The%20Role%20and%20Limits%20of%20Emotions%20in%20Training%20for%20Combat.pdf},
year = {2003},
date = {2003-06-01},
journal = {Brown Journal of World Affairs},
volume = {X},
pages = {63--76},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Traum, David; Fleischman, Michael; Hovy, Eduard
NL Generation for Virtual Humans in a Complex Social Environment Inproceedings
In: AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue, pp. 151–158, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_nl_2003,
title = {NL Generation for Virtual Humans in a Complex Social Environment},
author = {David Traum and Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/NL%20Generation%20for%20Virtual%20Humans%20in%20a%20Complex%20Social%20Environment.pdf},
year = {2003},
date = {2003-03-01},
booktitle = {AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue},
pages = {151--158},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David
The Error Is the Clue: Breakdown In Human-Machine Interaction Inproceedings
In: Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association, Switzerland, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_error_2003,
title = {The Error Is the Clue: Breakdown In Human-Machine Interaction},
author = {Bilyana Martinovski and David Traum},
url = {http://ict.usc.edu/pubs/The%20Error%20Is%20the%20Clue-%20Breakdown%20In%20Human-Machine%20Interaction.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association},
address = {Switzerland},
abstract = {This paper focuses not on the detection and correction of specific errors in the interaction between machines and humans, but rather cases of massive deviation from the user's conversational expectations and desires. This can be the result of too many or too unusual errors, but also from dialogue strategies disigned to minimize error, which make the interaction unnatutal in other ways. We study causes of irritation such as over-fragmentation, over-clarity, over-coordination, over-directedness, and repetiveness of verbal action, syntax, and intonation. Human reations to these irritating features typically appear in the following order: tiredness, tolerance, anger, confusion, irony, humor, exhaustion, uncertainty, lack of desire to communicate. The studied features of human expressions of irritation in non-face-to-face interaction are: intonation, emphatic speech, elliptic speech, speed of speech, extra-linguistic signs, speed of verbal action, and overlap.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}