Publications
Search
Chiu, Chung-Cheng; Marsella, Stacy C.
A style controller for generating virtual human behaviors Proceedings Article
In: The Tenth International Conference on Autonomous Agents and Multiagent Systems, Taipei, Taiwan, 2011.
@inproceedings{chiu_style_2011,
title = {A style controller for generating virtual human behaviors},
author = {Chung-Cheng Chiu and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/A%20style%20controller%20for%20generating%20virtual%20human%20behaviors.pdf},
year = {2011},
date = {2011-05-01},
booktitle = {The Tenth International Conference on Autonomous Agents and Multiagent Systems},
address = {Taipei, Taiwan},
abstract = {Creating a virtual character that exhibits realistic physical behaviors requires a rich set of animations. To mimic the variety as well as the subtlety of human behavior, we may need to animate not only a wide range of behaviors but also variations of the same type of behavior influenced by the environment and the state of the character, including the emotional and physiological state. A general approach to this challenge is to gather a set of animations produced by artists or motion capture. However, this approach can be extremely costly in time and effort. In this work, we propose a model that can learn styled motion generation and an algorithm that produce new styles of motions via style interpolation. The model takes a set of styled motions as training samples and creates new motions that are the generalization among the given styles. Our style interpolation algorithm can blend together motions with distinct styles, and improves on the performance of previous work. We verify our algorithm using walking motions of different styles, and the experimental results show that our method is signiï¬cantly better than previous work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Si, Mei; Marsella, Stacy C.
Modeling Theory of Mind and Cognitive Appraisal with Decision-Theoretic Agents Book Section
In: Appraisal, pp. 1–30, 2011.
@incollection{pynadath_modeling_2011,
title = {Modeling Theory of Mind and Cognitive Appraisal with Decision-Theoretic Agents},
author = {David V. Pynadath and Mei Si and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Theory%20of%20Mind%20and%20Cognitive%20Appraisal%20with%20Decision-Theoretic%20Agents.pdf},
year = {2011},
date = {2011-04-01},
booktitle = {Appraisal},
pages = {1–30},
abstract = {Agent-based simulation of human social behavior has become increasingly important as a basic research tool to further our understanding of social behavior, as well as to create virtual social worlds used to both entertain and educate. A key factor in human social interaction is our beliefs about others as intentional agents, a Theory of Mind. How we act depends not only on the immediate effect of our actions but also on how we believe others will react. In this paper, we discuss PsychSim, an implemented multiagent-based simulation tool for modeling social interaction and influence. While typical approaches to such modeling have used first-order logic, PsychSim agents have their own decision-theoretic models of the world, including beliefs about their environment and recursive models of other agents. Using these quantitative models of uncertainty and preferences, we have translated existing psychological theories into a decision-theoretic semantics that allow the agents to reason about degrees of believability in a novel way. We demonstrate the expressiveness of PsychSim’s decision-theoretic implementation of Theory of Mind by presenting its use as the foundation for a domain-independent model of appraisal theory, the leading psychological theory of emotion. The model of appraisal within PsychSim demonstrates the key role of a Theory of Mind capacity in appraisal and social emotions, as well as arguing for a uniform process for emotion and cognition.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Miller, Lynn C.; Marsella, Stacy C.; Dey, Teresa; Appleby, Paul Robert; Christensen, John L.; Klatt, Jennifer; Read, Stephen J.
Socially Optimized Learning in Virtual Environments (SOLVE) Proceedings Article
In: 4th International Conference on Interactive Digital Storytelling, Vancouver, British Columbia, 2011.
@inproceedings{miller_socially_2011,
title = {Socially Optimized Learning in Virtual Environments (SOLVE)},
author = {Lynn C. Miller and Stacy C. Marsella and Teresa Dey and Paul Robert Appleby and John L. Christensen and Jennifer Klatt and Stephen J. Read},
url = {http://ict.usc.edu/pubs/Socially%20Optimized%20Learning%20in%20Virtual%20Environments%20(SOL%20VE).pdf},
year = {2011},
date = {2011-01-01},
booktitle = {4th International Conference on Interactive Digital Storytelling},
address = {Vancouver, British Columbia},
abstract = {Although young men who have sex with men (MSM) are at high risk for contracting HIV, few interventions address the affective/automatic factors (e.g., sexual arousal, shame/stigma) that may precipitate young MSM’s risk- taking. A National Institutes of Health (NIH)-funded DVD interactive video intervention that simulated a "virtual date" with guides/mentors reduced sexual risk over 3-months for Black, Latino and Caucasian young MSM. In the current work, limitations of the DVD format (e.g., number of different risk challenges MSM encounter; DVD quickly becomes dated) were addressed with 3-D animated intelligent agents/interactive digital storytelling using a Unity Game platform. The development (e.g., design, art, social science formative research, etc.) of this NIH funded game for changing risky behavior is described as well as the ongoing national randomized "on-line" evaluation over 6 months.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Si, Mei; Marsella, Stacy C.; Pynadath, David V.
Importance of Well-Motivated Characters in Interactive Narratives: An Empirical Evaluation Proceedings Article
In: 2010 International Conference on Interactive Digital Storytelling, Edinburgh, UK, 2010.
@inproceedings{si_importance_2010,
title = {Importance of Well-Motivated Characters in Interactive Narratives: An Empirical Evaluation},
author = {Mei Si and Stacy C. Marsella and David V. Pynadath},
url = {http://ict.usc.edu/pubs/Importance%20of%20Well-Motivated%20Characters%20in%20Interactive%20Narratives.pdf},
year = {2010},
date = {2010-11-01},
booktitle = {2010 International Conference on Interactive Digital Storytelling},
address = {Edinburgh, UK},
abstract = {http://ict.usc.edu/pubs/Importance%20of%20Well-Motivated%20Characters%20in%20Interactive%20Narratives.pdf},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lance, Brent; Marsella, Stacy C.
The Expressive Gaze Model: Using Gaze to Express Emotion Journal Article
In: Computer Graphics and Applications, IEEE, vol. 30, no. 4, pp. 62–73, 2010.
@article{lance_expressive_2010,
title = {The Expressive Gaze Model: Using Gaze to Express Emotion},
author = {Brent Lance and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/The_Expressive_Gaze_Model.pdf},
year = {2010},
date = {2010-08-01},
journal = {Computer Graphics and Applications, IEEE},
volume = {30},
number = {4},
pages = {62–73},
abstract = {The Expressive Gaze Model is a hierarchical framework for composing simple behaviors into emotionally expressive gaze shifts for virtual characters. Its primary components are the Gaze Warping Transformation, which generates emotionally expressive head and torso movement in a gaze shift, and an eye movement model.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ito, Jonathan Y.; Pynadath, David V.; Sonenberg, Liz; Marsella, Stacy C.
Wishful Thinking in Effective Decision Making (Extended Abstract) Proceedings Article
In: Proceedings of the 9th International Conference on Autonomous Agents and Multiagent Systems, Toronto, Ontario, 2010.
@inproceedings{ito_wishful_2010,
title = {Wishful Thinking in Effective Decision Making (Extended Abstract)},
author = {Jonathan Y. Ito and David V. Pynadath and Liz Sonenberg and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Wishful_thinking_extended_abstract.pdf},
year = {2010},
date = {2010-05-01},
booktitle = {Proceedings of the 9th International Conference on Autonomous Agents and Multiagent Systems},
volume = {1},
address = {Toronto, Ontario},
abstract = {Creating agents that act reasonably in uncertain environments is a primary goal of agent-based research. In this work we explore the theory that wishful thinking can be an effective strategy in uncertain and competitive decision scenarios. Specifically, we present the constraints necessary for wishful thinking to outperform Expected Utility Maximization and take instances of popular games from Game-Theoretic literature showing how they relate to our constraints and whether they can benefit from wishful-thinking.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ito, Jonathan Y.; Pynadath, David V.; Marsella, Stacy C.
Modeling self-deception within a decision-theoretic framework Journal Article
In: Autonomous Agent Multi-Agent Systems, vol. 20, pp. 3–13, 2010.
@article{ito_modeling_2010,
title = {Modeling self-deception within a decision-theoretic framework},
author = {Jonathan Y. Ito and David V. Pynadath and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20self-deception%20within%20a%20decision-theoretic%20framework.pdf},
doi = {10.1007/s10458-009-9096-7},
year = {2010},
date = {2010-05-01},
journal = {Autonomous Agent Multi-Agent Systems},
volume = {20},
pages = {3–13},
abstract = {Computational modeling of human belief maintenance and decision-making processes has become increasingly important for a wide range of applications. In this paper, we present a framework for modeling the human capacity for self-deception from a decision-theoretic perspective in which we describe an integrated process of wishful thinking which includes the determination of a desired belief state, the biasing of internal beliefs towards or away from this desired belief state, and the final decision-making process. Finally, we show that in certain situations self-deception can be beneficial.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Si, Mei; Marsella, Stacy C.; Pynadath, David V.
Evaluating Directorial Control in a Character-Centric Interactive Narrative Framework Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Toronto, Canada, 2010.
@inproceedings{si_evaluating_2010,
title = {Evaluating Directorial Control in a Character-Centric Interactive Narrative Framework},
author = {Mei Si and Stacy C. Marsella and David V. Pynadath},
url = {http://ict.usc.edu/pubs/Evaluating%20Directorial%20Control%20in%20a%20Character-Centric%20Interactive%20Narrative%20Framework.pdf},
year = {2010},
date = {2010-05-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Toronto, Canada},
abstract = {Interactive narrative allows the user to play a role in a story and interact with other characters controlled by the system. Directorial control is a procedure for dynamically tuning the interaction towards the author's desired effects. Most existing approaches for directorial control are built within plot-centric frameworks for interactive narrative and do not have a systematic way to ensure that the characters are always well-motivated during the interaction. Thespian is a character-centric framework for interactive narrative. In our previous work on Thespian, we presented an approach for applying directorial control while not affecting the consistency of characters' motivations. This work evaluates the effectiveness of our directorial control approach. Given the priority of generating only well-motivated characters' behaviors, we empirically evaluate how often the author's desired effects are achieved. We also discuss how the directorial control procedure can save the author effort in configuring the characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Si, Mei; Marsella, Stacy C.
Modeling Rich Characters in Interactive Narrative Games Proceedings Article
In: GAMEON-ASIA 2010, Shanghai, China, 2010.
@inproceedings{si_modeling_2010-1,
title = {Modeling Rich Characters in Interactive Narrative Games},
author = {Mei Si and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Rich%20Characters%20in%20Interactive%20Narrative%20Games.pdf},
year = {2010},
date = {2010-03-01},
booktitle = {GAMEON-ASIA 2010},
address = {Shanghai, China},
abstract = {Computing technologies have advanced rapidly over the past decade. Faster machines, better graphics, and more advanced algorithms become available every year. Moreover, the evolution of internet technology and the increasing accessibility of computing resources and mobile devices allow computing technologies to go beyond business and scientific computing, and become an important means for providing entertainment and facilitating communication. These advances have helped to enable a new form of media – interactive narrative games. Interactive narrative games allow a user to play a role in a story and interact with other characters driven by AI agents. The user's choices affect the unfolding of the story. Because of the support of user interactivity and the use of computer simulated virtual environments, interactive narrative games are closely related to video games. In fact, the rapid growth of interest in interactive narrative games is in part motivated by the explosion of computer-based games in recent years. Compared to more traditional forms of video games, such as arcade games, action games, and even role playing games, interactive narrative games emphasize more of the social and narrative aspects of the experi- ence. Story, of course, is a central part of the human experience both as entertainment and as a powerful tool for providing pedagogy. We watch movies, read novels and tell stories. Interactive narrative games provide an experience that integrates user agency with the engaging power of narrative. Interactive narrative games have been recognized as a promising tool for providing both pedagogy and entertainment. They have been proposed for a range of training applications, e.g. [13, 20, 26, 35, 22] as well as entertainment applications, e.g. [10, 23, 3, 12, 11, 36]. In this paper, we discuss the design desiderata for interactive narrative games, and in particular for creating the virtual characters in interactive narrative games. We argue that a rich model of characters that are well-motivated, socially aware and have a "Theory of Mind" is needed. We discuss the state of the art work on modeling virtual characters. In particular, we present the approaches taken in Thes- pian [27, 26, 28, 29, 31, 30] – a decision-theoretic multi-agent framework for interactive narratives.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Si, Mei; Marsella, Stacy C.; Pynadath, David V.
Modeling appraisal in theory of mind reasoning Proceedings Article
In: Journal of Autonomous Agents and Multi-Agent Systems; Proceedings of the 8th International Conference on Intelligent Virtual Agents, pp. 14–31, Tokyo, Japan, 2010.
@inproceedings{si_modeling_2010,
title = {Modeling appraisal in theory of mind reasoning},
author = {Mei Si and Stacy C. Marsella and David V. Pynadath},
url = {http://ict.usc.edu/pubs/Modeling%20appraisal%20in%20theory%20of%20mind%20reasoning.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {Journal of Autonomous Agents and Multi-Agent Systems; Proceedings of the 8th International Conference on Intelligent Virtual Agents},
volume = {20(1)},
pages = {14–31},
address = {Tokyo, Japan},
abstract = {Cognitive appraisal theories, which link human emotional experience to their interpretations of events happening in the environment, are leading approaches to model emotions. Cognitive appraisal theories have often been used both for simulating "real emotions" in virtual characters and for predicting the human user's emotional experience to facilitate human-computer interaction. In this work, we investigate the computational modeling of appraisal in a multi-agent decision-theoretic framework using Partially Observable Markov Decision Process-based (POMDP) agents. Domain-independent approaches are developed for five key appraisal dimensions (motivational relevance, motivation congruence, accountability, control and novelty). We also discuss how the modeling of theory of mind (recursive beliefs about self and others) is realized in the agents and is critical for simulating social emotions. Our model of appraisal is applied to three different scenarios to illustrate its usages. This work not only provides a solution for computationally modeling emotion in POMDPbased agents, but also illustrates the tight relationship between emotion and cognition – the appraisal dimensions are derived from the processes and information required for the agent's decision-making and beliefmaintenance processes, which suggests a uniform cognitive structure for emotion and cognition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jina; Wang, Zhiyang; Marsella, Stacy C.
Evaluating Models of Speaker Head Nods for Virtual Agents Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Toronto, Canada, 2010.
@inproceedings{lee_evaluating_2010,
title = {Evaluating Models of Speaker Head Nods for Virtual Agents},
author = {Jina Lee and Zhiyang Wang and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20Models%20of%20Speaker%20Head%20Nods%20for%20Virtual%20Agents.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Toronto, Canada},
abstract = {Virtual human research has often modeled nonverbal behaviors based on the findings of psychological research. In recent years, however, there have been growing efforts to use automated, data-driven approaches to find patterns of nonverbal behaviors in video corpora and even thereby discover new factors that have not been previously documented. However, there have been few studies that compare how the behaviors generated by different approaches are interpreted by people. In this paper, we present an evaluation study to compare the perception of nonverbal behaviors, more specifically head nods, generated by different approaches. Studies have shown that head nods serve a variety of communicative functions and that the head is in constant motion during speaking turns. To evaluate the different approaches of head nod generation, we asked human subjects to evaluate videos of a virtual agent displaying nods generated by a human, by a machine learning data-driven approach, or by a handcrafted rule-based approach. Results show that there is a significant effect on the perception of head nods in terms of appropriate nod occurrence, especially between the data driven approach and the rule-based approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Putten, Astrid M.; Kramer, Nicole C.; Gratch, Jonathan
How Our Personality Shapes Our Interactions with Virtual Characters - Implications for Research and Development Proceedings Article
In: 10th International Conference on Intelligent Virtual Agents, Philadelphia, PA, 2010.
@inproceedings{von_der_putten_how_2010,
title = {How Our Personality Shapes Our Interactions with Virtual Characters - Implications for Research and Development},
author = {Astrid M. Putten and Nicole C. Kramer and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/How%20Our%20Personality%20Shapes%20Our%20Interactions%20with%20Virtual%20Characters%20-%20Implications%20for%20Research%20and%20Development.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {10th International Conference on Intelligent Virtual Agents},
address = {Philadelphia, PA},
abstract = {here is a general lack of awareness for the influence of users' personality traits on human-agent-interaction (HAI). Numerous studies do not even consider explanatory variables like age and gender although they are easily accessible. The present study focuses on explaining the occurrence of social effects in HAI. Apart from the original manipulation of the study we assessed the users' traits. Results show that participants' personality traits those traits which relate to persistent behavioral patterns in social contact (agreeableness, extraversion, approach avoidance, self-efficacy in monitoring others, shyness, public self-consciousness) were found to be predictive, whereas other personality traits and gender and age did not affect the evaluation. Results suggest that personality traits are better predictors for the evaluation outcome than the actual behavior of the agent as it has been manipulated in the experiment. Implications for the research on and development of virtual agents are discussed.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lance, Brent; Marsella, Stacy C.
Glances, Glares, and Glowering: How Should a Virtual Human Express Emotion Through Gaze? Journal Article
In: Journal Autonomous Agents and Multi-Agent Systems, vol. 20, no. 1, 2010.
@article{lance_glances_2010,
title = {Glances, Glares, and Glowering: How Should a Virtual Human Express Emotion Through Gaze?},
author = {Brent Lance and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Glances,%20Glares,%20and%20Glowering-%20How%20Should%20a%20Virtual%20Human%20Express%20Emotion%20Through%20Gaze.pdf},
year = {2010},
date = {2010-01-01},
journal = {Journal Autonomous Agents and Multi-Agent Systems},
volume = {20},
number = {1},
abstract = {Gaze is an extremely powerful expressive signal that is used for many purposes, from expressing emotion to regulating human interaction. The use of gaze as a signal has been exploited to strong effect in hand-animated characters, greatly enhancing the believability of the character's simulated life. However, virtual humans animated in real-time have been less successful at using expressive gaze. One reason for this is that a gaze shift towards any speci?c target can be performed in many different ways, using many different expressive manners of gaze, each of which can potentially imply a different emotional or cognitive internal state. However, there is currently no mapping that describes how a user will attribute these internal states to a virtual character performing a gaze shift in a particular manner. In this paper, we begin to address this by providing the results of an empirical study that explores the mapping between an observer's attribution of emotional state to gaze. The purpose of this mapping is to allow for an interactive virtual human to generate believable gaze shifts that a user will attribute a desired emotional state to. We have generated a set of animations by composing low-level gaze attributes culled from the nonverbal behavior literature. Then, subjects judged the animations displaying these attributes. While the results do not provide a complete mapping between gaze and emotion, they do provide a basis for a generative model of expressive gaze.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Marsella, Stacy C.; Gratch, Jonathan; Petta, Paola
Computational Models of Emotion Book Section
In: Scherer, K. R.; Bänziger, T.; Roesch, (Ed.): A blueprint for an affectively competent agent: Cross-fertilization between Emotion Psychology, Affective Neuroscience, and Affective Computing, Oxford University Press, Oxford, 2010.
@incollection{marsella_computational_2010,
title = {Computational Models of Emotion},
author = {Stacy C. Marsella and Jonathan Gratch and Paola Petta},
editor = {K. R. Scherer and T. Bänziger and Roesch},
url = {http://ict.usc.edu/pubs/Computational%20Models%20of%20Emotion.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {A blueprint for an affectively competent agent: Cross-fertilization between Emotion Psychology, Affective Neuroscience, and Affective Computing},
publisher = {Oxford University Press},
address = {Oxford},
abstract = {Recent years have seen a significant expansion in research on computational models of human emotional processes, driven both by their potential for basic research on emotion and cognition as well as their promise for an ever increasing range of applications. This has led to a truly interdisciplinary, mutually beneficial partnership between emotion research in psychology and computational science, of which this volume is an exemplar. To understand this partnership and its potential for transforming existing practices in emotion research across disciplines and for disclosing important novel areas of research, we explore in this chapter the history of work in computational models of emotion including the various uses to which they have been put, the theoretical traditions that have shaped their development, and how these uses and traditions are reflected in their underlying architectures.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Si, Mei; Marsella, Stacy C.; Pynadath, David V.
Directorial Control in a Decision-Theoretic Framework for Interactive Narrative Proceedings Article
In: Proceedings of the International Conference on Interactive Digital Storytelling, Guimarães, Portugal, 2009.
@inproceedings{si_directorial_2009,
title = {Directorial Control in a Decision-Theoretic Framework for Interactive Narrative},
author = {Mei Si and Stacy C. Marsella and David V. Pynadath},
url = {http://ict.usc.edu/pubs/Directorial%20Control%20in%20a%20Decision-Theoretic%20Framework%20for%20Interactive%20Narrative.pdf},
year = {2009},
date = {2009-12-01},
booktitle = {Proceedings of the International Conference on Interactive Digital Storytelling},
address = {Guimarães, Portugal},
abstract = {Computer aided interactive narrative has received increasing attention in recent years. Automated directorial control that manages the development of the story in the face of user interaction is an important aspect of interactive narrative design. Most existing approaches lack an explicit model of the user. This limits the approaches' ability of predicting the user's experience, and hence undermines the effectiveness of the approaches. Thespian is a multi-agent framework for authoring and simulating interactive narratives with explicit models of the user. This work extends Thespian with the ability to provide proactive directorial control using the user model. In this paper, we present the algorithms in detail, followed by examples.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jina; Marsella, Stacy C.; Prendinger, Helmut; Neviarouskaya, Alena
Learning a Model of Speaker Head Nods using Gesture Corpora Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Budapest, Hungary, 2009.
@inproceedings{lee_learning_2009-1,
title = {Learning a Model of Speaker Head Nods using Gesture Corpora},
author = {Jina Lee and Stacy C. Marsella and Helmut Prendinger and Alena Neviarouskaya},
url = {http://ict.usc.edu/pubs/learning%20a%20model%20of%20speaker%20head%20nods.pdf},
year = {2009},
date = {2009-10-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Budapest, Hungary},
abstract = {During face-to-face conversation, the speaker's head is continually in motion. These movements serve a variety of important communicative functions, and may also be influ- enced by our emotions. The goal for this work is to build a domain-independent model of speaker's head movements and investigate the effect of using affective information dur- ing the learning process. Once the model is learned, it can later be used to generate head movements for virtual agents. In this paper, we describe our machine-learning approach to predict speaker's head nods using an annotated corpora of face-to-face human interaction and emotion labels gener- ated by an affect recognition model. We describe the feature selection process, training process, and the comparison of results of the learned models under varying conditions. The results show that using affective information can help pre- dict head nods better than when no affective information is used.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jina; Marsella, Stacy C.
Learning Models of Speaker Head Nods with Affective Information Proceedings Article
In: The 3rd International Conference on Affective Computing and Intelligent Interaction (ACII 2009), Amsterdam, The Netherlands, 2009.
@inproceedings{lee_learning_2009,
title = {Learning Models of Speaker Head Nods with Affective Information},
author = {Jina Lee and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Learning%20Models%20of%20Speaker%20Head%20Nods%20with%20Affective%20Information.pdf},
year = {2009},
date = {2009-09-01},
booktitle = {The 3rd International Conference on Affective Computing and Intelligent Interaction (ACII 2009)},
address = {Amsterdam, The Netherlands},
abstract = {During face-to-face conversation, the speaker's head is continually in motion. These movements serve a variety of important communicative functions, and may also be influ- enced by our emotions. The goal for this work is to build a domain-independent model of speaker's head movements and investigate the effect of using affective information dur- ing the learning process. Once the model is learned, it can later be used to generate head movements for virtual agents. In this paper, we describe our machine-learning approach to predict speaker's head nods using an annotated corpora of face-to-face human interaction and emotion labels gener- ated by an affect recognition model. We describe the feature selection process, training process, and the comparison of results of the learned models under varying conditions. The results show that using affective information can help pre- dict head nods better than when no affective information is used.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Wang, Ning
Assessing the validity of a computational model of emotional coping Proceedings Article
In: International Conference on Affective Computing and Intelligent Interaction, Amsterdam, The Netherlands, 2009.
@inproceedings{marsella_assessing_2009,
title = {Assessing the validity of a computational model of emotional coping},
author = {Stacy C. Marsella and Jonathan Gratch and Ning Wang},
url = {http://ict.usc.edu/pubs/Assessing%20the%20validity%20of%20a%20computational%20model%20of%20emotional%20coping.pdf},
year = {2009},
date = {2009-09-01},
booktitle = {International Conference on Affective Computing and Intelligent Interaction},
address = {Amsterdam, The Netherlands},
abstract = {In this paper we describe the results of a rigorous empirical study evaluating the coping responses of a computational model of emotion. We discuss three key kinds of coping, Wishful Thinking, Resignation and Dis-tancing that impact an agent's beliefs, intentions and desires, and compare these coping responses to related work in the attitude change literature. We discuss the EMA computational model of emotion and identify sev-eral hypotheses it makes concerning these coping processes. We assess these hypotheses against the beha-vior of human subjects playing a competitive board game, using monetary gains and losses to induce emo-tion and coping. Subject's appraisals, emotional state and coping responses were indexed at key points throughout a game, revealing a pattern of subject's al-tering their beliefs, desires and intentions as the game unfolds. The results clearly support several of the hypo-theses on coping responses but also identify (a) exten-sions to how EMA models Wishful Thinking as well as (b) individual differences in subject's coping responses.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Lee, Jina
Predicting Speaker Head Nods and the Effects of Affective Information Proceedings Article
In: 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops, 2009.
@inproceedings{marsella_predicting_2009,
title = {Predicting Speaker Head Nods and the Effects of Affective Information},
author = {Stacy C. Marsella and Jina Lee},
url = {http://ict.usc.edu/pubs/Predicting%20Speaker%20Head%20Nods%20and%20the%20Effects%20of%20Affective%20Information.pdf},
year = {2009},
date = {2009-09-01},
booktitle = {3rd International Conference on Affective Computing and Intelligent Interaction and Workshops},
volume = {12},
abstract = {During face-to-face conversation, our body is continually in motion with various head, gesture, and posture movements. Based on findings of the communicative functions served by these nonverbal behaviors, many virtual agent systems have modeled them to make the virtual agent look more effective and believable. One channel of nonverbal behaviors that has received less attention is head movements, despite the important functions served by them. The goal for this work is to build a domain-independent model of speaker's head movements that could be used to generate head movements for virtual agents. In this paper, we present a machine learning approach for learning models of head movements by focusing on when speaker head nods should occur and conduct evaluation studies that compare the nods generated by this work to our previous approach of using hand-crafted rules},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Gordon, Andrew S.; Lane, H. Chad; Pynadath, David V.
UrbanSim: A Game-based Simulation for Counterinsurgency and Stability-focused Operations Proceedings Article
In: Workshop on Intelligent Educational Games, 14th International Conference on Artificial Intelligence in Education, Brighton, UK, 2009.
@inproceedings{mcalinden_urbansim_2009,
title = {UrbanSim: A Game-based Simulation for Counterinsurgency and Stability-focused Operations},
author = {Ryan McAlinden and Andrew S. Gordon and H. Chad Lane and David V. Pynadath},
url = {http://ict.usc.edu/pubs/UrbanSim-%20A%20Game-based%20Simulation%20for%20Counterinsurgency%20and%20Stability-focused%20Operations.pdf},
year = {2009},
date = {2009-07-01},
booktitle = {Workshop on Intelligent Educational Games, 14th International Conference on Artificial Intelligence in Education},
address = {Brighton, UK},
abstract = {The UrbanSim Learning Package is a simulation-based training application designed for the U.S. Army to develop commanders' skills for conducting counterinsurgency operations. UrbanSim incorporates multiple artificial intelligence (AI) technologies in order to provide an effective training experience, three of which are described in this paper. First, UrbanSim simulates the mental attitudes and actions of groups and individuals in an urban environment using the PsychSim reasoning engine. Second, UrbanSim interjects narrative elements into the training experience using a case-based story engine, driven by non-fiction stories told by experienced commanders. Third, UrbanSim provides intelligent tutoring using a simulation-based method for eliciting and evaluating learner decisions. UrbanSim represents a confluence of AI techniques that seek to bridge the gap between basic research and deployed AI systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.