Publications
Search
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
Toward a Neural-Symbolic Sigma: Introducing Neural Network Learning Proceedings Article
In: Proceedings of the 15th Annual Meeting of the International Conference on Cognitive Modelling, 2002–2017 EasyChair, Coventry, United Kingdom, 2017.
@inproceedings{rosenbloom_toward_2017,
title = {Toward a Neural-Symbolic Sigma: Introducing Neural Network Learning},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://cs.usc.edu/ rosenblo/Pubs/ESNNL%20D.pdf},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the 15th Annual Meeting of the International Conference on Cognitive Modelling},
publisher = {2002–2017 EasyChair},
address = {Coventry, United Kingdom},
abstract = {Building on earlier work extending Sigma’s mixed (symbols + probabilities) graphical band to inference in feedforward neural networks, two forms of neural network learning – target propagation and backpropagation – are introduced, bringing Sigma closer to a full neural-symbolic architecture. Adapting Sigma’s reinforcement learning (RL) capability to use neural networks in policy learning then yields a hybrid form of neural RL with probabilistic action modeling.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lycan, Bethany; Artstein, Ron
Direct and Mediated Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS), IWSDS, Farmington, PA, 2017.
@inproceedings{lycan_direct_2017,
title = {Direct and Mediated Interaction with a Holocaust Survivor},
author = {Bethany Lycan and Ron Artstein},
url = {http://www.uni-ulm.de/fileadmin/website_uni_ulm/iui.iwsds2017/papers/IWSDS2017_paper_13.pdf},
year = {2017},
date = {2017-06-01},
booktitle = {Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS)},
publisher = {IWSDS},
address = {Farmington, PA},
abstract = {The New Dimensions in Testimony dialogue system was placed in two museums under two distinct conditions: docent-led group interaction, and free interaction with visitors. Analysis of the resulting conversations shows that docent-led interactions have a lower vocabulary and a higher proportion of user utterances that directly relate to the system’s subject matter, while free interaction is more personal in nature. Under docent-led interaction the system gives a higher proportion of direct appropriate responses, but overall correct system behavior is about the same in both conditions because the free interaction condition has more instances where the correct system behavior is to avoid a direct response.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Kyusong; Zhao, Tiancheng; Ultes, Stefan; Rojas-Barahona, Lina; Pincus, Eli; Traum, David; Eskenazi, Maxine
An Assessment Framework for DialPort Proceedings Article
In: Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS), IWSDS, Farmington, PA, 2017.
@inproceedings{lee_assessment_2017,
title = {An Assessment Framework for DialPort},
author = {Kyusong Lee and Tiancheng Zhao and Stefan Ultes and Lina Rojas-Barahona and Eli Pincus and David Traum and Maxine Eskenazi},
url = {https://www.uni-ulm.de/fileadmin/website_uni_ulm/iui.iwsds2017/papers/IWSDS2017_paper_1.pdf},
year = {2017},
date = {2017-06-01},
booktitle = {Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS)},
publisher = {IWSDS},
address = {Farmington, PA},
abstract = {Collecting a large amount of real human-computer interaction data in various domains is a cornerstone in the development of better data-driven spoken dialog systems. The DialPort project is creating a portal to collect a constant stream of real user conversational data on a variety of topics. In order to keep real users attracted to DialPort, it is crucial to develop a robust evaluation framework to monitor and maintain high performance. Different from earlier spoken dialog systems, Dial-Port has a heterogeneous set of spoken dialog systems gathered under one outwardlooking agent. In order to access this new structure, we have identified some unique challenges that DialPort will encounter so that it can appeal to real users and have created a novel evaluation scheme that quantitatively assesses their performance in these situations. We look at assessment from the point of view of the system developer as well as that of the end user.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Novick, David
Building rapport with extraverted and introverted agents Proceedings Article
In: Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS), IWSDS, Farmington, PA, 2017.
@inproceedings{brixey_building_2017,
title = {Building rapport with extraverted and introverted agents},
author = {Jacqueline Brixey and David Novick},
url = {https://www.uni-ulm.de/fileadmin/website_uni_ulm/iui.iwsds2017/papers/IWSDS2017_paper_8.pdf},
year = {2017},
date = {2017-06-01},
booktitle = {Proceedings of the International Workshop on Spoken Dialogue Systems (IWSDS)},
publisher = {IWSDS},
address = {Farmington, PA},
abstract = {Psychology research reports that people tend to seek companionship with those who have a similar level of extraversion, and markers in dialogue show the speaker’s extraversion. Work in human-computer interaction seeks to understand creating and maintaining rapport between humans and ECAs. This study examines if humans will report greater rapport when interacting with an agent with an extraversion/introversion profile similar to their own. ECAs representing an extrovert and an introvert were created by manipulating three dialogue features. Using an informal, task-oriented setting, participants interacted with one of the agents in an immersive environment. Results suggest that subjects did not report the greatest rapport when interacting with the agent most similar to their level of extraversion.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron
Inter-annotator Agreement Book Section
In: Handbook of Linguistic Annotation, pp. 297–313, Springer Netherlands, Dordrecht, Netherlands, 2017, ISBN: 978-94-024-0879-9 978-94-024-0881-2.
@incollection{artstein_inter-annotator_2017,
title = {Inter-annotator Agreement},
author = {Ron Artstein},
url = {http://link.springer.com/10.1007/978-94-024-0881-2_11},
doi = {10.1007/978-94-024-0881-2_11},
isbn = {978-94-024-0879-9 978-94-024-0881-2},
year = {2017},
date = {2017-06-01},
booktitle = {Handbook of Linguistic Annotation},
pages = {297–313},
publisher = {Springer Netherlands},
address = {Dordrecht, Netherlands},
abstract = {This chapter touches upon several issues in the calculation and assessment of interannotator agreement. It gives an introduction to the theory behind agreement coefficients and examples of their application to linguistic annotation tasks. Specific examples explore variation in annotator performance due to heterogeneous data, complex labels, item difficulty, and annotator differences, showing how global agreement coefficients may mask these sources of variation, and how detailed agreement studies can give insight into both the annotation process and the nature of the underlying data. The chapter also reviews recent work on using machine learning to exploit the variation among annotators and learn detailed models from which accurate labels can be inferred. I therefore advocate an approach where agreement studies are not used merely as a means to accept or reject a particular annotation scheme, but as a tool for exploring patterns in the data that are being annotated.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin; Karumbaiah, Shamya; Tokel, S. Tugba; Core, Mark G.; Stratou, Giota; Auerbach, Daniel; Georgila, Kallirroi
Analyzing Learner Affect in a Scenario-Based Intelligent Tutoring System Proceedings Article
In: Proceedings of the International Conference on Artificial Intelligence in Education, pp. 544–547, Springer, Wuhan, China, 2017, ISBN: 978-3-319-61425-0.
@inproceedings{nye_analyzing_2017,
title = {Analyzing Learner Affect in a Scenario-Based Intelligent Tutoring System},
author = {Benjamin Nye and Shamya Karumbaiah and S. Tugba Tokel and Mark G. Core and Giota Stratou and Daniel Auerbach and Kallirroi Georgila},
url = {https://link.springer.com/chapter/10.1007/978-3-319-61425-0_60},
doi = {https://doi.org/10.1007/978-3-319-61425-0_60},
isbn = {978-3-319-61425-0},
year = {2017},
date = {2017-06-01},
booktitle = {Proceedings of the International Conference on Artificial Intelligence in Education},
pages = {544–547},
publisher = {Springer},
address = {Wuhan, China},
abstract = {Scenario-based tutoring systems influence affective states due to two distinct mechanisms during learning: 1) reactions to performance feedback and 2) responses to the scenario context or events. To explore the role of affect and engagement, a scenario-based ITS was instrumented to support unobtrusive facial affect detection. Results from a sample of university students showed relatively few traditional academic affective states such as confusion or frustration, even at decision points and after poor performance (e.g., incorrect responses). This may show evidence of "over-flow," with a high level of engagement and interest but insufficient confusion/disequilibrium for optimal learning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Rosenberg, Evan Suma; Shapiro, Ari
Just-in-time, viable, 3D avatars from scans Journal Article
In: Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents), vol. 28, no. 3-4, 2017.
@article{feng_just--time_2017,
title = {Just-in-time, viable, 3D avatars from scans},
author = {Andrew Feng and Evan Suma Rosenberg and Ari Shapiro},
url = {http://onlinelibrary.wiley.com/doi/10.1002/cav.1769/epdf},
doi = {10.1002/cav.1769},
year = {2017},
date = {2017-05-01},
journal = {Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents)},
volume = {28},
number = {3-4},
abstract = {We demonstrate a system that can generate a photorealistic, interactive 3-D character from a human subject that is capable of movement, emotion, speech, and gesture in less than 20 min without the need for 3-D artist intervention or specialized technical knowledge through a near automatic process. Our method uses mostly commodity or off-the-shelf hardware. We demonstrate the just-in-time use of generating such 3-D models for virtual and augmented reality, games, simulation, and communication. We anticipate that the inexpensive generation of such photorealistic models will be useful in many venues where a just-in-time 3-D reconstructions of digital avatars that resemble particular human subjects is necessary.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chollet, Mathieu; Scherer, Stefan
Assessing Public Speaking Ability from Thin Slices of Behavior Proceedings Article
In: Proceedings of the 12th IEEE Conference on Automatic Face and Gesture Recognition (FG 2017), pp. 310–316, IEEE, Washington, DC, 2017, ISBN: 978-1-5090-4023-0.
@inproceedings{chollet_assessing_2017,
title = {Assessing Public Speaking Ability from Thin Slices of Behavior},
author = {Mathieu Chollet and Stefan Scherer},
url = {http://ieeexplore.ieee.org/document/7961757/},
doi = {10.1109/FG.2017.45},
isbn = {978-1-5090-4023-0},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 12th IEEE Conference on Automatic Face and Gesture Recognition (FG 2017)},
pages = {310–316},
publisher = {IEEE},
address = {Washington, DC},
abstract = {An important aspect of public speaking is delivery, which consists of the appropriate use of non-verbal cues to strengthen the message. Recent works have successfully predicted ratings of public speaking delivery aspects using the entire presentations of speakers. However, in other contexts, such as the assessment of personality or the prediction of job interview outcomes, it has been shown that thin slices, brief excerpts of behavior, provide enough information for raters to make accurate predictions. In this paper, we consider the use of thin slices for predicting ratings of public speaking behavior. We use a publicly available corpus of public speaking presentations and obtain ratings of full videos and thin slices. We first study how thin slices ratings are related to full video ratings. Then, we use automatic audio-visual feature extraction methods and machine learning algorithms to create models for predicting public speaking ratings, and evaluate these models for predicting thin slices ratings and full videos ratings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Rosenberg, Evan Suma; Shapiro, Ari
Just-in-time, viable, 3D avatars from scans Journal Article
In: Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents), vol. 28, no. 3-4, 2017.
@article{feng_just–time_2017,
title = {Just-in-time, viable, 3D avatars from scans},
author = {Andrew Feng and Evan Suma Rosenberg and Ari Shapiro},
url = {http://onlinelibrary.wiley.com/doi/10.1002/cav.1769/epdf},
doi = {10.1002/cav.1769},
year = {2017},
date = {2017-05-01},
journal = {Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents)},
volume = {28},
number = {3-4},
abstract = {We demonstrate a system that can generate a photorealistic, interactive 3-D character from a human subject that is capable of movement, emotion, speech, and gesture in less than 20 min without the need for 3-D artist intervention or specialized technical knowledge through a near automatic process. Our method uses mostly commodity or off-the-shelf hardware. We demonstrate the just-in-time use of generating such 3-D models for virtual and augmented reality, games, simulation, and communication. We anticipate that the inexpensive generation of such photorealistic models will be useful in many venues where a just-in-time 3-D reconstructions of digital avatars that resemble particular human subjects is necessary.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; DeVault, David
Towards An Autonomous Agent that Provides Automated Feedback on Students' Negotiation Skills Proceedings Article
In: Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems, pp. 410–418, International Foundation for Autonomous Agents and Multiagent Systems, Sao Paulo, Brazil, 2017.
@inproceedings{johnson_towards_2017,
title = {Towards An Autonomous Agent that Provides Automated Feedback on Students' Negotiation Skills},
author = {Emmanuel Johnson and Jonathan Gratch and David DeVault},
url = {http://dl.acm.org/citation.cfm?id=3091187},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems},
pages = {410–418},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Sao Paulo, Brazil},
abstract = {Although negotiation is an integral part of daily life, most people are unskilled negotiators. To improve one's skill set, a range of costly options including self-study guides, courses, and training programs are o ered by various companies and educational institutions. For those who can't a ord costly training options, virtual role playing agents o er a low-costalternative. To be e ective, these systems must allow students to engage in experiential learning exercises and provide personalized feedback on the learner's performance. In this paper, we show how a number of negotiation principles can be formalized and quanti ed. We then establish the pedagogical relevance of several automatic metrics, and show that these metrics are signi cantly correlated with negotiation outcomes in a human-agent negotiation. This illustrates the realism and helps to validate these principles. It also shows the potential of technology being used to quantify feedback that is traditionally provided through more qualitative approaches. The metrics we describe can provide students with personalized feedback on the errors they make in a negotiation exercise and thereby support guided experiential learning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Traum, David; Boberg, Jill; Gainer, Alesia; Gratch, Jonathan; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Listen to My Body: Does Making Friends Help Influence People? Proceedings Article
In: Proceedings of the 30th International Florida Artificial Intelligence Research Society Conference (FLAIRS-30), AAAI, Marco Island, Florida, 2017.
@inproceedings{artstein_listen_2017,
title = {Listen to My Body: Does Making Friends Help Influence People?},
author = {Ron Artstein and David Traum and Jill Boberg and Alesia Gainer and Jonathan Gratch and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {https://aaai.org/ocs/index.php/FLAIRS/FLAIRS17/paper/view/15501/14979},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 30th International Florida Artificial Intelligence Research Society Conference (FLAIRS-30)},
publisher = {AAAI},
address = {Marco Island, Florida},
abstract = {We investigate the effect of relational dialogue on creating rapport and exerting social influence in human-robot conversation, by comparing interactions with and without a relational component, and with different agent types. Human participants interact with two agents – a Nao robot and a virtual human – in four dialogue scenarios: one involving building familiarity, and three involving sharing information and persuasion in item-ranking tasks. Results show that both agents influence human decision-making; people prefer interacting with the robot, feel higher rapport with the robot, and believe the robot has more influence; and that objective influence of the agent on the person is increased by building familiarity, but is not significantly different between the agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Auerbach, Daniel; Mehta, Tirth R.; Hartholt, Arno
Building a Backbone for Multi-Agent Tutoring in GIFT (Work in Progress) Proceedings Article
In: Proceedings of the GIFTSym5, pp. 23–35, ARL, Orlando, Florida, 2017.
@inproceedings{nye_building_2017,
title = {Building a Backbone for Multi-Agent Tutoring in GIFT (Work in Progress)},
author = {Benjamin D. Nye and Daniel Auerbach and Tirth R. Mehta and Arno Hartholt},
url = {https://books.google.com/books?id=PwMtDwAAQBAJ&printsec=copyright&source=gbs_pub_info_r#v=onepage&q&f=false},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the GIFTSym5},
pages = {23–35},
publisher = {ARL},
address = {Orlando, Florida},
abstract = {As intelligent tutoring systems (ITS) increasingly need to interoperate and co-exist, emerging systems have transitioned toward service-oriented designs to enable modularity and composability of tutoring components made and/or maintained by different research and development groups. However, as a research community, we have still not reached a point where it is trivial for a new service to be added into a system like the Generalized Intelligent Framework for Tutoring (GIFT; Sottilare, Goldberg, Brawner, & Holden, 2012). In an early paper considering this issue with respect to the GIFT architecture (Nye & Morrison, 2013), we proposed addressing this issue by building toward a lightweight multi-agent archi-tecture where certain services act as autonomous agents: “a system situated within and a part of an environment that senses that environment and acts on it, over time, in pursuit of its own agenda and so as to affect what it senses in the future” (Franklin & Graesser, 1997; p. 25). In our work in progress described here, we discuss how we are approaching the opportunity to build such capabilities into GIFT. The high level goals of our work are targeting two core goals for GIFT: A) to be a lightweight framework that will expand access to and use of ITS and B) to help GIFT to increase the intelligence and effectiveness of its services based on data over time. We are currently targeting the first goal, which will underpin the second goal. However, what does it mean to be a lightweight framework? In this context, a “lightweight framework” is framed as minimizing the following criteria: (1) hardware requirements, (2) software expertise to design services, (3) software expertise to use existing services, (4) software expertise to stand up the message-passing layer between agents, and (5) a minimal working message ontology (Nye & Morrison, 2013). Since our original paper four years ago, GIFT has made significant strides in reducing barriers related to hardware by building a cloud-based version and software expertise to use GIFT services through authoring tools. It has also developed a growing ontology of messages (e.g., https://gifttutoring.org/projects/gift/wiki/Interface_Control_Document_2016-1). With that said, despite now-extensive documentation, designing new services for GIFT is still not trivial and strong expertise is required to pass messages between GIFT modules and agents (either internal or external). To address these issues, the Building a Backbone project is working toward agent-oriented designs that build on GIFT's existing service-oriented framework. By moving from services toward agents, modules will be able to act more autonomously, enabling capabilities such as plug-and-play, hotswapping, and selecting between multiple services providing the same capabilities. These new capabilities are intended to reduce barriers to building new GIFT-compatible services and also to integrating GIFT with other service-oriented ecosystems. The first steps toward these capabilities are an ontology mapping service and an initial integration that combines GIFT, the Virtual Human Toolkit core framework for agents, and the SuperGLU framework for adding agent-oriented capabilities for coordinating services. This paper reports on work to date, with an emphasis on target capabilities, design decisions, challenges, and open research questions for this work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Laird, John E.; Lebiere, Christian
Précis of ‘A Standard Model of the Mind’ Proceedings Article
In: Proceedings of the Fifth Annual Conference on Advances in Cognitive Systems, 2014 Cognitive Systems Foundation, Troy, New York, 2017.
@inproceedings{rosenbloom_precis_2017,
title = {Précis of ‘A Standard Model of the Mind’},
author = {Paul S. Rosenbloom and John E. Laird and Christian Lebiere},
url = {http://cs.usc.edu/ rosenblo/Pubs/SM%20ACS%202017%20D.pdf},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Fifth Annual Conference on Advances in Cognitive Systems},
publisher = {2014 Cognitive Systems Foundation},
address = {Troy, New York},
abstract = {A standard model captures a community consensus over a coherent region of science, such as particle physics. Here we summarize the key points from a longer article (Laird, Lebiere & Rosenbloom, 2017) that proposes developing such a model for human-like minds.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan
Grumpy & Pinocchio: Answering Human-Agent Negotiation Questions through Realistic Agent Design Proceedings Article
In: Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems, pp. 401–409, International Foundation for Autonomous Agents and Multiagent Systems, Sao Paulo, Brazil, 2017.
@inproceedings{mell_grumpy_2017,
title = {Grumpy & Pinocchio: Answering Human-Agent Negotiation Questions through Realistic Agent Design},
author = {Johnathan Mell and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=3091186},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems},
pages = {401–409},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Sao Paulo, Brazil},
abstract = {We present the Interactive Arbitration Guide Online (IAGO) plat-form, a tool for designing human-aware agents for use in negotia-tion. Current state-of-the-art research platforms are ideally suited for agent-agent interaction. While helpful, these often fail to ad-dress the reality of human negotiation, which involves irrational actors, natural language, and deception. To illustrate the strengths of the IAGO platform, the authors describe four agents which are designed to showcase the key design features of the system. We go on to show how these agents might be used to answer core questions in human-centered computing, by reproducing classical human-human negotiation results in a 2x2 human-agent study. The study presents results largely in line with expectations of human-human negotiation outcomes, and helps to demonstrate the validity and usefulness of the IAGO platform.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Increasing Fairness by Delegating Decisions to Autonomous Agents Proceedings Article
In: Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems, pp. 419–425, International Foundation for Autonomous Agents and Multiagent Systems, Sao Paulo, Brazil, 2017.
@inproceedings{de_melo_increasing_2017,
title = {Increasing Fairness by Delegating Decisions to Autonomous Agents},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=3091188},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems},
pages = {419–425},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Sao Paulo, Brazil},
abstract = {There has been growing interest in autonomous agents that act on our behalf, or represent us, across various domains such as negotiation, transportation, health, finance, and defense. As these agent representatives become immersed in society, it is critical we understand whether and, if so, how they disrupt the traditional patterns of interaction with others. In this paper, we study how programming agents to represent us, shapes our decisions in social settings. Here we show that, when acting through agent representatives, people are considerably less likely to accept unfair offers from others, when compared to direct interaction with others. This result, thus, demonstrates that agent representatives have the potential to promote fairer outcomes. Moreover, we show that this effect can also occur when people are asked to “program” human representatives, thus revealing that the act of programming itself can promote fairer behavior. We argue this happens because programming requires the programmer to deliberate on all possible situations that might arise and, thus, promote consideration of social norms – such as fairness – when making their decisions. These results have important theoretical, practical, and ethical implications for designing and the nature of people's decision making when they act through agents that act on our behalf.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Stratou, Giota; Gratch, Jonathan
Incorporating Emotion Perception into Opponent Modeling for Social Dilemmas Proceedings Article
In: Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems, pp. 801–809, International Foundation for Autonomous Agents and Multiagent Systems, Sao Paulo, Brazil, 2017.
@inproceedings{hoegen_incorporating_2017,
title = {Incorporating Emotion Perception into Opponent Modeling for Social Dilemmas},
author = {Rens Hoegen and Giota Stratou and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=3091239},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems},
pages = {801–809},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Sao Paulo, Brazil},
abstract = {Many everyday decisions involve a social dilemma: cooperation can enhance joint gains, but also make one vulnerable to exploitation. Emotion and emotional signaling is an important element of how people resolve these dilemmas. With the rise of a ective computing, emotion is also an important element of how people resolve these dilemmas with machines. In this article, we learn a predictive model of how people make decisions in an iterative social dilemma. We further show that model accuracy improves by incorporating a player's emotional displays as input to this model, and provide some insight into which emotions in uence social decisions. Finally, we show how this model can be used to perform textbackslashtextbackslashtextbackslashtextbackslashsocial planning": i.e., to generate a sequence of actions and expressions that achieve social goals (such as maximizing individual rewards). These techniques can be used to enhance machine-understanding of human behavior, as social decision-aids, or to drive the actions of virtual and robotic agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Narang, Sahil; Best, Andrew; Feng, Andrew; Kang, Sin-hwa; Manocha, Dinesh; Shapiro, Ari
Motion recognition of self and others on realistic 3D avatars Journal Article
In: Computer Animation and Virtual Worlds, vol. 28, no. 3-4, 2017, ISSN: 15464261.
@article{narang_motion_2017,
title = {Motion recognition of self and others on realistic 3D avatars},
author = {Sahil Narang and Andrew Best and Andrew Feng and Sin-hwa Kang and Dinesh Manocha and Ari Shapiro},
url = {http://onlinelibrary.wiley.com/doi/10.1002/cav.1762/epdf},
doi = {10.1002/cav.1762},
issn = {15464261},
year = {2017},
date = {2017-05-01},
journal = {Computer Animation and Virtual Worlds},
volume = {28},
number = {3-4},
abstract = {Current 3D capture and modeling technology can rapidly generate highly photorealistic 3D avatars of human subjects. However, while the avatars look like their human counterparts, their movements often do not mimic their own due to existing challenges in accurate motion capture and retargeting. A better understanding of factors that influence the perception of biological motion would be valuable for creating virtual avatars that capture the essence of their human subjects. To investigate these issues, we captured 22 subjects walking in an open space.We then performed a study where participants were asked to identify their own motion in varying visual representations and scenarios. Similarly, participants were asked to identify the motion of familiar individuals. Unlike prior studies that used captured footage with simple “point-light” displays, we rendered the motion on photo-realistic 3D virtual avatars of the subject. We found that self-recognition was significantly higher for virtual avatars than with point-light representations. Users were more confident of their responses when identifying their motion presented on their virtual avatar. Recognition rates varied considerably between motion types for recognition of others, but not for self-recognition. Overall, our results are consistent with previous studies that used recorded footage and offer key insights into the perception of motion rendered on virtual avatars.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Scherer, Scherer
The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment Proceedings Article
In: Proceedings of the Preconference on Affective Computing at the Society for Affective Science, Boston, MA, 2017.
@inproceedings{neubauer_effects_2017,
title = {The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment},
author = {Catherine Neubauer and Scherer Scherer},
url = {http://ict.usc.edu/pubs/The%20Effects%20of%20Pre-task%20Team%20Collaboration%20on%20Facial%20Expression%20and%20Speech%20Entrainment.pdf},
year = {2017},
date = {2017-04-01},
booktitle = {Proceedings of the Preconference on Affective Computing at the Society for Affective Science},
address = {Boston, MA},
abstract = {Many everyday tasks are complex and require the coordination of one or more individuals. Such tasks can be relatively simple like passing a ball to a friend during a game of catch, while others are more complex such as performing a life-saving surgery where surgeons, anesthesiologists and nurses all work together in a multi-person team [1]. Such coordination requires the appropriate allocation of cognitive and behavioral effort to meet the changing demands of their environment and cannot be completed alone [1]. These mutually cooperative behaviors can include team communication, body position and even affective cues [2]. Some behaviors are explicitly controlled to be coordinated [3] (e.g., when an individual purposely attempts to follow the behaviors of their teammate or team leader), while others are implicit or unconscious. Presently, these shared behaviors have been referred to as entrainment [4] [5], mimicry [6] [7] and even action matching [8] [9]; however, the specific term used typically refers to the underlying theoretical cause for the phenomenon. Theoretically, entrainment can be explained as the spontaneous interpersonal coupling that occurs because the behavior of one or more individuals is affected by another’s behavior in a closed loop system. Additionally, such behavior is typically evident when working on a mutual, goal-directed task [10]. Therefore, for the purposes of this paper we will refer to the cooperative behaviors between teammates that support problem solving as entrainment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manini, Barbara; Tsui, Katherine; Stone, Adam; Scassellati, Brian; Traum, David; Merla, Arcangelo; Petitto, Laura Ann
Physiological and behavioral correlates of babies’ social engagement with robot and virtual human artificial intelligence agents Proceedings Article
In: Proceedings of SRCD, Austin, TX, 2017.
@inproceedings{manini_physiological_2017,
title = {Physiological and behavioral correlates of babies’ social engagement with robot and virtual human artificial intelligence agents},
author = {Barbara Manini and Katherine Tsui and Adam Stone and Brian Scassellati and David Traum and Arcangelo Merla and Laura Ann Petitto},
url = {https://www.researchgate.net/publication/316167858_Physiological_and_behavioral_correlates_of_babies'_social_engagement_with_robot_and_virtual_human_artificial_intelligence_agents},
year = {2017},
date = {2017-04-01},
booktitle = {Proceedings of SRCD},
address = {Austin, TX},
abstract = {Exposure to the patterns of natural language in early life—especially in ways that are rich in socially contingent interaction and conversation—is among the most powerful facilitators of the human language acquisition process (Petitto et al., 2016). Adults’ infant-directed language (e.g., simple rhythmic nursery rhymes), communicated in social interactions with joint attention, supports babies’ biological predisposition to language development in the first year of life (Brook & Meltzoff, 2015). Yet many babies have minimal language exposure in early life that can have devastating consequences for their language learning and reading success—such as the deaf baby. With the aim to develop a learning tool for babies deprived of natural language input during sensitive periods in human development, we studied whether artificial intelligent agents (social robots and virtual humans) can serve as an augmentative communicative partner in early infancy. Using innovative thermal IR imaging technology, we recorded, imaged, and analyzed infants’ emotional arousal and behavioral responses during social interactions with a robot and virtual human, as compared with a real human. We asked whether babies’ physiological and behavioral responses of joint attention during these robot and virtual human interactions were similar to or different from interactions with a real human. We hypothesized that if babyartificial agent emotional arousal measures were observed to be similar to humans, then artificial agents may potentially serve as a promising tool in facilitating language learning in infants with early-life minimal language exposure. Methods: 10 hearing (nonsigning) infants (five 6-9mths; five 9-12mths). Following Meltzoff et al. (2010), after a brief familiarization period with the robot, infants participated in 6 10 episodes of robot head and eye gaze turning (left or right). Two screens were placed on each side of the robot, rendering it “looking at the screen” when it turned its head. Contiguous with the robot’s gaze/head, both screens showed a nursery rhyme in ASL, performed alternatively by a virtual human or a real human (held constant: physical features and linguistic content). Results: Time-locked/integrated infant behavior and thermal responses were analyzed (c.f., Merla, 2004; Manini et al., 2013). (1) Behavioral data showed babies followed robot gaze, yet the Thermal IR data added new insights: Significant increase in nasal-tip temperature was observed, indicative of suppression of the sympathetic activity and increase of parasympathetic/pro-social attentiveness. (2) Thermal responses with virtual human vs real human revealed a phasic decrease of temperature likely associated with increased vigilance and higher cognitive attention processes (e.g., match-mismatch analysis). Discussion: Robots and virtual humans may be effective as augmentative communicative partners for young babies. Novel here, we observed an integrated physiological and behavioral response of joint attention and social engagement during babies’ interaction with the robot. Moreover, the virtual human elicited a peaked attentional arousal reaction, which may be indicative of linguistic stimuli detection and/or a “readiness to learn.” The integration of physiological and behavioral responses provide insights that pave the way for groundbreaking applications in the field of artificial intelligence (Merla, 2014) and augmentative learning tools that promote language acquisition in young children.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Morency, Louis-Philippe
MultiSense—Context-Aware Nonverbal Behavior Analysis Framework: A Psychological Distress Use Case Journal Article
In: IEEE Transactions on Affective Computing, vol. 8, no. 2, pp. 190–203, 2017, ISSN: 1949-3045.
@article{stratou_multisensecontext-aware_2017,
title = {MultiSense—Context-Aware Nonverbal Behavior Analysis Framework: A Psychological Distress Use Case},
author = {Giota Stratou and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/document/7579221/},
doi = {10.1109/TAFFC.2016.2614300},
issn = {1949-3045},
year = {2017},
date = {2017-04-01},
journal = {IEEE Transactions on Affective Computing},
volume = {8},
number = {2},
pages = {190–203},
abstract = {During face-to-face interactions, people naturally integrate nonverbal behaviors such as facial expressions and body postures as part of the conversation to infer the communicative intent or emotional state of their interlocutor. The interpretation of these nonverbal behaviors will often be contextualized by interactional cues such as the previous spoken question, the general discussion topic or the physical environment. A critical step in creating computers able to understand or participate in this type of social face-to-face interactions is to develop a computational platform to synchronously recognize nonverbal behaviors as part of the interactional context. In this platform, information for the acoustic and visual modalities should be carefully synchronized and rapidly processed. At the same time, contextual and interactional cues should be remembered and integrated to better interpret nonverbal (and verbal) behaviors. In this article, we introduce a real-time computational framework, MultiSense, which offers flexible and efficient synchronization approaches for context-based nonverbal behavior analysis. MultiSense is designed to utilize interactional cues from both interlocutors (e.g., from the computer and the human participant) and integrate this contextual information when interpreting nonverbal behaviors. MultiSense can also assimilate behaviors over a full interaction and summarize the observed affective states of the user. We demonstrate the capabilities of the new framework with a concrete use case from the mental health domain where MultiSense is used as part of a decision support tool to assess indicators of psychological distress such as depression and post-traumatic stress disorder (PTSD). In this scenario, MultiSense not only infers psychological distress indicators from nonverbal behaviors but also broadcasts the user state in real-time to a virtual agent (i.e., a digital interviewer) designed to conduct semi-structured interviews with human participants. Our experiments show the added value of our multimodal synchronization approaches and also demonstrate the importance of MultiSense contextual interpretation when inferring distress indicators.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2011
Ozkan, Derya; Scherer, Stefan; Morency, Louis-Philippe
Step-wise Emotion Recognition using Concatenated-HMM Proceedings Article
In: Audio/Visual Emotion Challenge and Workshop (AVEC 2011), Memphis, TN, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ozkan_step-wise_2011,
title = {Step-wise Emotion Recognition using Concatenated-HMM},
author = {Derya Ozkan and Stefan Scherer and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Step-wise%20Emotion%20Recognition%20using%20Concatenated-HMM.pdf},
year = {2011},
date = {2011-10-01},
booktitle = {Audio/Visual Emotion Challenge and Workshop (AVEC 2011)},
address = {Memphis, TN},
abstract = {Human emotion is an important part of human-human communication, since the emotional state of an individual often a⬚ffects the way that he/she reacts to others. In this paper, we present a method based on concatenated Hidden Markov Model (co-HMM) to infer the dimensional and continuous emotion labels from audio-visual cues. Our method is based on the assumption that continuous emotion levels can be modeled by a set of discrete values. Based on this, we represent each emotional dimension by step-wise label classes, and learn the intrinsic and extrinsic dynamics using our co-HMM model. We evaluate our approach on the Audio-Visual Emotion Challenge (AVEC 2012) dataset. Our results show considerable improvement over the baseline regression model presented with the AVEC 2012.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pluss, Brian; DeVault, David; Traum, David
Toward Rapid Development of Multi-Party Virtual Human Negotiation Scenarios Proceedings Article
In: Proceedings of the 15th Workshop on the Semantics and Pragmatics of Dialogue (SemDial 2011), Los Angeles, CA, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{pluss_toward_2011,
title = {Toward Rapid Development of Multi-Party Virtual Human Negotiation Scenarios},
author = {Brian Pluss and David DeVault and David Traum},
url = {http://ict.usc.edu/pubs/Toward%20Rapid%20Development%20of%20Multi-Party%20Virtual%20Human%20Negotiation%20Scenarios.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {Proceedings of the 15th Workshop on the Semantics and Pragmatics of Dialogue (SemDial 2011)},
address = {Los Angeles, CA},
abstract = {This paper reports on an ongoing effort to en- able the rapid development of multi-party vir- tual human negotiation scenarios. We present a case study in which a new scenario sup- porting negotiation between two human role players and two virtual humans was devel- oped over a period of 12 weeks. We dis- cuss the methodology and development pro- cess that were employed, from storyline de- sign through role play and iterative develop- ment of the virtual humans' semantic and task representations and natural language process- ing capabilities. We analyze the effort, ex- pertise, and time required for each develop- ment step, and discuss opportunities to further streamline the development process.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kulms, Philip; Krämer, Nicole C.; Gratch, Jonathan; Kang, Sin-Hwa
It's in their eyes: A study on female and male virtual humans' gaze Proceedings Article
In: 11th International Conference on Intelligent Virtual Agents (IVA 2011), ReykjavÃk, Iceland, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kulms_its_2011,
title = {It's in their eyes: A study on female and male virtual humans' gaze},
author = {Philip Kulms and Nicole C. Krämer and Jonathan Gratch and Sin-Hwa Kang},
url = {http://ict.usc.edu/pubs/It's%20in%20their%20eyes-%20A%20study%20on%20female%20and%20male%20virtual%20humans'%20gaze.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {11th International Conference on Intelligent Virtual Agents (IVA 2011)},
address = {ReykjavÃk, Iceland},
abstract = {Social psychological research demonstrates that the same behavior might lead to different evaluations depending on whether it is shown by a man or a woman. With a view to design decisions with regard to virtual humans it is relevant to test whether this pattern also applies to gendered virtual humans. In a 2x2 between subjects experiment we manipulated the Rapport Agent's gaze behavior and its gender in order to test whether especially female agents are evaluated more negatively when they do not show gender specific immediacy behavior and avoid gazing at the interaction partner. Instead of this interaction effect we found two main effects: gaze avoidance was evaluated negatively and female agents were rated more positively than male agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Gandhe, Sudeep; McCall, Cade; Gratch, Jonathan; Blascovich, James J.; Traum, David
The effects of virtual agent humor and gaze behavior on human-virtual agent proxemics Proceedings Article
In: Proceedings of 11th International Conference on Intelligent Virtual Agents, IVA 2011, Reykjavik, Iceland, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{khooshabeh_effects_2011,
title = {The effects of virtual agent humor and gaze behavior on human-virtual agent proxemics},
author = {Peter Khooshabeh and Sudeep Gandhe and Cade McCall and Jonathan Gratch and James J. Blascovich and David Traum},
url = {http://ict.usc.edu/pubs/The%20effects%20of%20virtual%20agent%20humor%20and%20gaze%20behavior%20on%20human-virtual%20agent%20proxemics.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {Proceedings of 11th International Conference on Intelligent Virtual Agents, IVA 2011},
address = {Reykjavik, Iceland},
abstract = {We study whether a virtual agent that delivers humor through verbal behavior can affect an individual's proxemic behavior towards the agent. Participants interacted with a virtual agent through natural language and, in a separate task, performed an embodied interpersonal interaction task in a virtual environment. The study used minimum distance as the dependent measure. Humor generated by the virtual agent through a text chat did not have any significant effects on the proxemic task. This is likely due to the experimental constraint of only allowing participants to interact with a disembodied agent through a textual chat dialogue.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Lixing; Morency, Louis-Philippe; Gratch, Jonathan
Virtual Rapport 2.0 Proceedings Article
In: Proceedings of the 11th Conference on Intelligent Virtual Agents, ReykjavÃk, Iceland, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{huang_virtual_2011,
title = {Virtual Rapport 2.0},
author = {Lixing Huang and Louis-Philippe Morency and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Virtual%20Rapport%202.0.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {Proceedings of the 11th Conference on Intelligent Virtual Agents},
address = {ReykjavÃk, Iceland},
abstract = {Rapport, the feeling of being "in sync" with your conversational partners, is argued to underlie many desirable social effects. By generating proper verbal and nonverbal behaviors, virtual humans have been seen to create rapport during interactions with human users. In this paper, we introduce our approach to creating rapport following Tickle-Degnen and Rosenberg's three-factor (positivity, mutual attention and coordination) theory of rapport. By comparing with a previously published virtual agent, the Rapport Agent, we show that our virtual human predicts the timing of backchannel feedback and end-of-turn more precisely, performs more natural behaviors and, thereby creates much stronger feelings of rapport between users and virtual agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Sidner, Candy; Gratch, Jonathan; Artstein, Ron; Huang, Lixing; Morency, Louis-Philippe
Modeling Nonverbal Behavior of a Virtual Counselor during Intimate Self-Disclosure Proceedings Article
In: Proceedings of the 11th Conference on Intelligent Virtual Agents, ReykjavÃk, Iceland, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kang_modeling_2011,
title = {Modeling Nonverbal Behavior of a Virtual Counselor during Intimate Self-Disclosure},
author = {Sin-Hwa Kang and Candy Sidner and Jonathan Gratch and Ron Artstein and Lixing Huang and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Modeling%20Nonverbal%20Behavior%20of%20a%20Virtual%20Counselor%20during%20Intimate%20Self-Disclosure.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {Proceedings of the 11th Conference on Intelligent Virtual Agents},
address = {ReykjavÃk, Iceland},
abstract = {Humans often share personal information with others in order to create social connections. Sharing personal information is especially important in counseling interactions [2]. Research studying the relationship between intimate self-disclosure and human behavior critically informs the development of virtual agents that create rapport with human interaction partners. One significant example of this application is using virtu-al agents as counselors in psychotherapeutic situations. The capability of expressing different intimacy levels is key to a successful virtual counselor to reciprocally induce disclosure in clients. Nonverbal behavior is considered critical for indicating intimacy and is important when designing a social virtual agent such as a counselor. One key research question is how to properly express intimate self-disclosure. In this study, our main goal is to find what types of interviewees' nonverbal behavior is associated with different intimacy levels of verbal self-disclosure. Thus, we investigated humans' nonverbal behavior associated to self-disclosure during interview setting (with intimate topics).},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Aggarwal, Priti; Feeley, Kevin; Morbini, Fabrizio; Artstein, Ron; Leuski, Anton; Traum, David; Kim, Julia
Interactive characters for cultural training of small military units Proceedings Article
In: The 11th International Conference on Intelligent Virtual Agents (IVA 2011), Reykjavik, Iceland, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{aggarwal_interactive_2011,
title = {Interactive characters for cultural training of small military units},
author = {Priti Aggarwal and Kevin Feeley and Fabrizio Morbini and Ron Artstein and Anton Leuski and David Traum and Julia Kim},
url = {http://ict.usc.edu/pubs/Interactive%20characters%20for%20cultural%20training%20of%20small%20military%20units.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {The 11th International Conference on Intelligent Virtual Agents (IVA 2011)},
address = {Reykjavik, Iceland},
abstract = {CHAOS, the Combat Hunter Action and Observation Simulation, is an immersive simulation training environment which gives small military units the experience of interacting with local Afghan villagers during a patrol. It is a physical build-out of a housing compound in a mock Afghan village, with several life-size reactive and interactive animated Pashto-speaking virtual characters. The exercise requires an infantry squad to locate and interview a character named Omar, communicating through a live human interpreter and attending to proper protocol regarding Omar's family. Character animation and behavior is based on extensive interviews with Afghan experts to provide a realistic setting of the intended locale. The system combines virtual human technology, story engineering, and physical set building to provide a compelling training environment that can handle a full squad, requiring trainees to integrate tasks such as working with an interpreter, dealing with non-English speakers from another culture, and assessing information and disposition to make decisions in a mission context.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Jan, Dusan; Core, Mark; Traum, David
Using virtual tour behavior to build dialogue models for training review Proceedings Article
In: Proceedings of the 11th International conference on Intelligent Virtual Agents (IVA 2011), ReykjavÃk, Iceland, 2011.
Abstract | Links | BibTeX | Tags: Learning Sciences, Virtual Humans
@inproceedings{roque_using_2011,
title = {Using virtual tour behavior to build dialogue models for training review},
author = {Antonio Roque and Dusan Jan and Mark Core and David Traum},
url = {http://ict.usc.edu/pubs/Using%20virtual%20tour%20behavior%20to%20build%20dialogue%20models%20for%20training%20review.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {Proceedings of the 11th International conference on Intelligent Virtual Agents (IVA 2011)},
address = {ReykjavÃk, Iceland},
abstract = {We develop an intelligent agent that builds a user model of a learner during a tour of a virtual world. The user model is based on the learner's answers to questions during the tour. A dialogue model for a simulated instructor is tailored to the individual learner based upon this user model. We describe an evaluation to track system accuracy and user perceptions.},
keywords = {Learning Sciences, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz; Traum, David
A Cultural Decision-Making Model for Virtual Agents Playing Negotiation Games Proceedings Article
In: International Workshop on Culturally Motivated Virtual Characters, Reykjavik, Iceland, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nouri_cultural_2011,
title = {A Cultural Decision-Making Model for Virtual Agents Playing Negotiation Games},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/A%20Cultural%20Decision-Making%20Model%20for%20Virtual%20Agents%20Playing%20Negotiation%20Games.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {International Workshop on Culturally Motivated Virtual Characters},
address = {Reykjavik, Iceland},
abstract = {We present a novel model of decision-making in social tasks for virtual humans. The model considers multiple valuations of the avail- able choices in a decision set according to individual and social factors such as own utility, total group utility, and relative utility. Cultural dif- ferences are incorporated using Hofstede's dimensional model of culture and a ect the decision making process by changing the di erent weight- ings of the factors. We have integrated the decision model into the dia- logue manager of a virtual human system, and developed protocols and dialogue capabilities to support virtual humans in playing a simple nego- tiation game (Ultimatum Game). We present evaluations between both a culturally oriented virtual human and a person and between two virtual humans (with di erent culture models).},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Aggarwal, Priti; Traum, David
The BML Sequencer: A Tool for Authoring Multi-character Animations Proceedings Article
In: 11th International Conference on Intelligent Virtual Agents (IVA 2011), ReykjavÃk, Iceland, 2011.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{aggarwal_bml_2011,
title = {The BML Sequencer: A Tool for Authoring Multi-character Animations},
author = {Priti Aggarwal and David Traum},
url = {http://ict.usc.edu/pubs/BML%20Sequencer.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {11th International Conference on Intelligent Virtual Agents (IVA 2011)},
address = {ReykjavÃk, Iceland},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Chance, Eric; Rajpurohit, Dinesh; DeVault, David; Leuski, Anton; Morie, Jacquelyn; Traum, David
Checkpoint Exercise: Training with Virtual Actors in Virtual Worlds Proceedings Article
In: The 11th International Conference on Intelligent Virtual Agents, (IVA 2011), Reykjavik, Iceland, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans, Virtual Worlds
@inproceedings{jan_checkpoint_2011,
title = {Checkpoint Exercise: Training with Virtual Actors in Virtual Worlds},
author = {Dusan Jan and Eric Chance and Dinesh Rajpurohit and David DeVault and Anton Leuski and Jacquelyn Morie and David Traum},
url = {http://www.ict.usc.edu/pubs/Checkpoint%20Exercise-%20Training%20with%20Virtual%20Actors%20in%20Virtual%20Worlds.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {The 11th International Conference on Intelligent Virtual Agents, (IVA 2011)},
address = {Reykjavik, Iceland},
abstract = {We have implemented a checkpoint exercise in Second Life where the user interacts with several computer avatars in a team based activity. We describe the experience and the implementation of our solution and show some evaluation results.},
keywords = {Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Wilson, Cyrus A.; Alexander, Oleg; Tunwattanapong, Borom; Peers, Pieter; Ghosh, Abhijeet; Busch, Jay; Hartholt, Arno; Debevec, Paul
Facial Cartography: Interactive Scan Correspondence Proceedings Article
In: ACM/Eurographics Symposium on Computer Animation, 2011.
Abstract | Links | BibTeX | Tags: Graphics, Virtual Humans
@inproceedings{wilson_facial_2011,
title = {Facial Cartography: Interactive Scan Correspondence},
author = {Cyrus A. Wilson and Oleg Alexander and Borom Tunwattanapong and Pieter Peers and Abhijeet Ghosh and Jay Busch and Arno Hartholt and Paul Debevec},
url = {http://ict.usc.edu/pubs/Facial%20Cartography-%20Interactive%20Scan%20Correspondence.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {ACM/Eurographics Symposium on Computer Animation},
abstract = {We present a semi-automatic technique for computing surface correspondences between 3D facial scans in different expressions, such that scan data can be mapped into a common domain for facial animation. The technique can accurately correspond high-resolution scans of widely differing expressions – without requiring intermediate pose sequences – such that they can be used, together with reflectance maps, to create high-quality blendshape-based facial animation. We optimize correspondences through a combination of Image, Shape, and Internal forces, as well as Directable forces to allow a user to interactively guide and refine the solution. Key to our method is a novel representation, called an Active Visage, that balances the advantages of both deformable templates and correspondence computation in a 2D canonical domain. We show that our semi-automatic technique achieves more robust results than automated correspondence alone, and is more precise than is practical with unaided manual input.},
keywords = {Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Rushforth, Michael; Aggarwal, Priti; Traum, David
Evaluation of an Integrated Authoring Tool for Building Advanced Question-Answering Characters Proceedings Article
In: 12th Annual Conference of the International Speech Communication Association (InterSpeech 2011), Florence, Italy, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_evaluation_2011,
title = {Evaluation of an Integrated Authoring Tool for Building Advanced Question-Answering Characters},
author = {Sudeep Gandhe and Michael Rushforth and Priti Aggarwal and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20an%20Integrated%20Authoring%20Tool%20for%20%20Building%20Advanced%20Question-Answering%20Characters.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {12th Annual Conference of the International Speech Communication Association (InterSpeech 2011)},
address = {Florence, Italy},
abstract = {We present the evaluation of an integrated authoring tool for rapid prototyping of dialogue systems. These dialogue systems are designed to support virtual humans engaging in advanced question-answering dialogues, such as for training tactical questioning skills. The tool was designed to help non-experts, who may have little or no knowledge of linguistics or computer science, build virtual characters that can play the role of an interviewee. The tool has been successfully used by several different non-experts to create a number of virtual characters used successfully for both training and human subjects testing. We report on experiences with seven such characters, whose development time was as little as two weeks including concept development and a round of user testing.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Traum, David
Reinforcement Learning of Argumentation Dialogue Policies in Negotiation Proceedings Article
In: The 12 thAnnual Conference of the International Speech Communication Association (InterSpeech), Florence, Italy, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{georgila_reinforcement_2011,
title = {Reinforcement Learning of Argumentation Dialogue Policies in Negotiation},
author = {Kallirroi Georgila and David Traum},
url = {http://ict.usc.edu/pubs/Reinforcement%20Learning%20of%20Argumentation%20Dialogue%20Policies%20in%20Negotiation.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {The 12 thAnnual Conference of the International Speech Communication Association (InterSpeech)},
address = {Florence, Italy},
abstract = {We build dialogue system policies for negotiation, and in particular for argumentation. These dialogue policies are designed for negotiation against users of different cultural norms (individualists, collectivists, and altruists). In order to learn these policies we build simulated users (SUs), i.e. models that simulate the behavior of real users, and use reinforcement learning (RL). The SUs are trained on a spoken dialogue corpus in a negotiation domain, and then tweaked towards a particular cultural norm using hand-crafted rules. We evaluate the learned policies in a simulation setting. Our results are consistent with our SUs, in other words, the policies learn what they are designed to learn, which shows that RL is a promising technique for learning policies in domains, such as argumentation, that are more complex than standard slot-filling applications.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Dehghani, Morteza; Khooshabeh, Peter; Huang, Lixing; Oganesyan, Lia; Gratch, Jonathan
Cultural Frame-Switching using Accented Spoken Language by a Virtual Character Proceedings Article
In: 11th International Conference on Intelligent Virtual Agents (IVA 2011), Reykjavik, Iceland, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{dehghani_cultural_2011,
title = {Cultural Frame-Switching using Accented Spoken Language by a Virtual Character},
author = {Morteza Dehghani and Peter Khooshabeh and Lixing Huang and Lia Oganesyan and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Cultural%20Frame-Switching%20using%20Accented%20Spoken%20Language%20by%20a%20Virtual%20Character.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {11th International Conference on Intelligent Virtual Agents (IVA 2011)},
address = {Reykjavik, Iceland},
abstract = {In this paper, we examine whether embodied conversational agents can be used to implement socio-cultural markers. We investigate whether the accent of a virtual character, as a marker for culture, can cause cultural frame-shifts in individuals. We report an experiment, per- formed among bicultural and monocultural individuals, in which we test the above hypothesis. Our results show that a virtual agent can have a socio-cultural effect on people's cognition. This work makes unique con- tributions to the design and evaluation of intelligent virtual agents as well as the theoretical psychological literature.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
DeVault, David; Sagae, Kenji; Traum, David
Detecting the Status of a Predictive Incremental Speech Understanding Model for Real-Time Decision-Making in a Spoken Dialogue System Proceedings Article
In: The 12th Conference of the International Speech Communication Association (InterSpeech), Florence, Italy, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{devault_detecting_2011,
title = {Detecting the Status of a Predictive Incremental Speech Understanding Model for Real-Time Decision-Making in a Spoken Dialogue System},
author = {David DeVault and Kenji Sagae and David Traum},
url = {http://ict.usc.edu/pubs/Detecting%20the%20Status%20of%20a%20Predictive%20Incremental%20Speech%20Understanding%20Model%20for%20Real-Time%20Decision-Making%20in%20a%20Spoken%20Dialogue%20System.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {The 12th Conference of the International Speech Communication Association (InterSpeech)},
address = {Florence, Italy},
abstract = {We explore the potential for a responsive spoken dialogue system to use the real-time status of an incremental speech understanding model to guide its incremental decision-making about how to respond to a user utterance that is still in progress. Spoken dialogue systems have a range of potentially useful real-time response options as a user is speaking, such as providing acknowledgments or backchannels, interrupting the user to ask a clarification question or to initiate the system's response, or even completing the user's utterance at appropriate moments. However, implementing such incremental response capabilities seems to require that a system be able to assess its own level of understanding incrementally, so that an appropriate response can be selected at each moment. In this paper, we use a data-driven classification approach to explore the trade-offs that a virtual human dialogue system faces in reliably identifying how its understanding is progressing during a user utterance.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul
From memory to problem solving: Mechanism reuse in a graphical cognitive architecture Proceedings Article
In: Proceedings of the 4th Conference on Artificial General Intelligence, Mountain View, CA, 2011.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@inproceedings{rosenbloom_memory_2011,
title = {From memory to problem solving: Mechanism reuse in a graphical cognitive architecture},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/From%20memory%20to%20problem%20solving-%20Mechanism%20reuse%20in%20a%20graphical%20cognitive%20architecture.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {Proceedings of the 4th Conference on Artificial General Intelligence},
address = {Mountain View, CA},
abstract = {This article describes the extension of a memory architecture that is implemented via graphical models to include core aspects of problem solving. By extensive reuse of the general graphical mechanisms originally developed to support memory, this demonstrates how a theoretically elegant implementation level can enable increasingly broad architectures without compromising overall simplicity and uniformity. In the process, it bolsters the potential of such an approach for developing the more complete architectures that will ultimately be necessary to support autonomous general intelligence.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Antos, Dimitrios; Melo, Celso M.; Gratch, Jonathan; Grosz, Barbara
The Influence of Emotion Expression on Perceptions of Trustworthiness in Negotiation Proceedings Article
In: AAAI Conference on Artificial Intelligence, San Francisco, CA, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{antos_influence_2011,
title = {The Influence of Emotion Expression on Perceptions of Trustworthiness in Negotiation},
author = {Dimitrios Antos and Celso M. Melo and Jonathan Gratch and Barbara Grosz},
url = {http://ict.usc.edu/pubs/The%20influence%20of%20emotion%20expression%20on%20perceptions%20of%20trustworthiness%20in%20negotiation.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {AAAI Conference on Artificial Intelligence},
address = {San Francisco, CA},
abstract = {When interacting with computer agents, people make inferences about various characteristics of these agents, such as their reliability and trustworthiness. These per- ceptions are significant, as they influence people’s be- havior towards the agents, and may foster or inhibit re- peated interactions between them. In this paper we in- vestigate whether computer agents can use the expres- sion of emotion to influence human perceptions of trust- worthiness. In particular, we study human-computer in- teractions within the context of a negotiation game, in which players make alternating offers to decide on how to divide a set of resources. A series of negotiation games between a human and several agents is then fol- lowed by a “trust game.” In this game people have to choose one among several agents to interact with, as well as how much of their resources they will trust to it. Our results indicate that, among those agents that displayed emotion, those whose expression was in ac- cord with their actions (strategy) during the negotiation game were generally preferred as partners in the trust game over those whose emotion expressions and ac- tions did not mesh. Moreover, we observed that when emotion does not carry useful new information, it fails to strongly influence human decision-making behavior in a negotiation setting.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Traum,
Learning Culture-Specific Dialogue Models from Non Culture-Specific Data Proceedings Article
In: HCI International 2011, the 14th International Conference on Human-Computer Interaction, Orlando, FL, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{georgila_learning_2011,
title = {Learning Culture-Specific Dialogue Models from Non Culture-Specific Data},
author = {Kallirroi Georgila and Traum},
url = {http://ict.usc.edu/pubs/Learning%20Culture-Specific%20Dialogue%20Models%20from%20Non%20Culture-Specific%20Data.pdf},
year = {2011},
date = {2011-07-01},
booktitle = {HCI International 2011, the 14th International Conference on Human-Computer Interaction},
address = {Orlando, FL},
abstract = {We build culture-specific dialogue policies of virtual humans for negotiation and in particular for argumentation and persuasion. In order to do that we use a corpus of non-culture specific dialogues and we build simulated users (SUs), i.e. models that simulate the behavior of real users. Then using these SUs and Reinforcement Learning (RL) we learn negotiation dialogue policies. Furthermore, we use research findings about specific cultures in order to tweak both the SUs and the reward functions used in RL towards a particular culture. We evaluate the learned policies in a simulation setting. Our results are consistent with our SU manipulations and RL reward functions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Herrera, David; Novick, David; Jan, Dusan; Traum, David
Dialog Behaviors across Culture and Group Size Book Section
In: Universal Access in HCI, Part II, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{herrera_dialog_2011,
title = {Dialog Behaviors across Culture and Group Size},
author = {David Herrera and David Novick and Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dialog%20Behaviors%20across%20Culture%20and%20Group%20Size.pdf},
year = {2011},
date = {2011-07-01},
booktitle = {Universal Access in HCI, Part II},
abstract = {This study analyzes joint interaction behaviors of two-person and four-person standing conversations from three different cultures, American, Arab, and Mexican. To determine whether people use joint interaction behaviors differently in multiparty versus dyadic conversation, and how differences in culture affect this relationship, we examine differences in proxemics, speaker and listener gaze behaviors, and overlap and pause at turn transitions. Our analysis suggests that proxemics, gaze, and mutual gaze to coordinate turns change with group size and with culture. However, these changes do not always agree with predictions from the research literature. These unanticipated outcomes demonstrate the importance of collecting and analyzing joint interaction behaviors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Melo, Celso M.; Gratch, Jonathan; Carnevale, Peter
Reverse Appraisal: Inferring from Emotion Displays who is the Cooperator and the Competitor in a Social Dilemma Proceedings Article
In: The 33rd Annual Meeting of the Cognitive Science Society (CogSci) 2011, Boston, MA, 2011.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{de_melo_reverse_2011,
title = {Reverse Appraisal: Inferring from Emotion Displays who is the Cooperator and the Competitor in a Social Dilemma},
author = {Celso M. Melo and Jonathan Gratch and Peter Carnevale},
url = {http://ict.usc.edu/pubs/Reverse%20Appraisal-%20Inferring%20from%20Emotion%20Displays%20who%20is%20the%20Cooperator%20and%20the%20Competitor%20in%20a%20Social%20Dilemma.pdf},
year = {2011},
date = {2011-07-01},
booktitle = {The 33rd Annual Meeting of the Cognitive Science Society (CogSci) 2011},
address = {Boston, MA},
abstract = {This paper explores whether and how facial displays of emotion can impact emergence of cooperation in a social dilemma. Three experiments are described where participants play the iterated prisoner's dilemma with (computer) players that display emotion. Experiment 1 compares a cooperative player, whose displays reflect a goal of mutual cooperation, with a control player that shows no emotion. Experiment 2 compares a competitive player, whose displays reflect a goal of getting more points than the participant, and the control player. Experiment 3 compares the cooperative and competitive players. Results show that people: cooperate more with the cooperative than the control player (Experiment 1); do not cooperate differently with the competitive and control players (Experiment 2); and, cooperate more with the cooperative than the competitive player, when they play the latter first (Experiment 3). In line with a social functions view of emotion, we argue people infer, from emotion displays, the other player's propensity to cooperate by reversing the emotion appraisal process. Post- game surveys show that people interpret the emotion displays according to appraisal variables (desirability, responsibility and controllability) in ways that are consistent with predictions from appraisal theories of emotion.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Traum, David
NPCEditor: Creating Virtual Human Dialogue Using Information Retrieval Techniques Journal Article
In: AI Magazine, vol. 32, no. 2, pp. 42–56, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{leuski_npceditor_2011-1,
title = {NPCEditor: Creating Virtual Human Dialogue Using Information Retrieval Techniques},
author = {Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/NPCEditor-%20Creating%20Virtual%20Human%20Dialogue%20Using%20Information%20Retrieval%20Techniques.pdf},
year = {2011},
date = {2011-07-01},
journal = {AI Magazine},
volume = {32},
number = {2},
pages = {42–56},
abstract = {NPCEditor is a system for building a natural language processing component for virtual humans capable of engaging a user in spoken dialog on a limited domain. It uses statistical language classification technology for mapping from a user's text input to system responses. NPCEditor provides a user-friendly editor for creating effective virtual humans quickly. It has been deployed as a part of various virtual human systems in several applications.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Rushforth, Michael; Gandhe, Sudeep; Traum, David
Limits of simple dialogue acts for tactical questioning dialogues Proceedings Article
In: 7th IJCAI Workshop on Knowledge and Reasoning in Practical Dialogue Systems, Barcelona, Spain, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_limits_2011,
title = {Limits of simple dialogue acts for tactical questioning dialogues},
author = {Ron Artstein and Michael Rushforth and Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/Limits%20of%20simple%20dialogue%20acts%20for%20tactical%20questioning%20dialogues.pdf},
year = {2011},
date = {2011-07-01},
booktitle = {7th IJCAI Workshop on Knowledge and Reasoning in Practical Dialogue Systems},
address = {Barcelona, Spain},
abstract = {A set of dialogue acts, generated automatically by applying a dialogue act scheme to a domain representation designed for easy scenario authoring, covers approximately 72%–76% of user utterances spoken in live interaction with a tactical questioning simulation trainer. The domain is represented as facts of the form textbackslashtextbackslashtextbackslashtextbackslashtextlessobject, attribute, valuetextbackslashtextbackslashtextbackslashtextbackslashtextgreater and conversational actions of the form textbackslashtextbackslashtextbackslashtextbackslashtextlesscharacter, actiontextbackslashtextbackslashtextbackslashtextbackslashtextgreater. User utterances from the corpus that fall outside the scope of the scheme include questions about temporal relations, relations between facts and relations between objects, questions about reason and evidence, assertions by the user, conditional offers, attempts to set the topic of conversation, and compound utterances. These utterance types constitute the limits of the simple dialogue act scheme.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Gratch, Jonathan; Huang, Lixing; Tao, Jianhua
Does culture affect the perception of emotion in virtual faces? Proceedings Article
In: International Symposium on Applied Perception in Graphics and Visualization, pp. 165, Los Angeles, CA, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{khooshabeh_does_2011-1,
title = {Does culture affect the perception of emotion in virtual faces?},
author = {Peter Khooshabeh and Jonathan Gratch and Lixing Huang and Jianhua Tao},
url = {http://ict.usc.edu/pubs/Does%20culture%20affect%20the%20perception%20of%20emotion%20in%20virtual%20faces.pdf},
year = {2011},
date = {2011-07-01},
booktitle = {International Symposium on Applied Perception in Graphics and Visualization},
pages = {165},
address = {Los Angeles, CA},
abstract = {Previous research, which has used images of real human faces and mostly from the same facial expression database [Matsumoto and Ekman 1988], has shown that individuals perceive emotions universally across cultures. We conducted an experiment to determine whether culture affects the perception of emotions rendered on virtual faces. Specifically, we test the holistic perception hypothesis that individuals from collectivist cultures, such as East Asians, visually sample information from central regions of the face (near the top of the nose by the eyes), as opposed to sampling from specific features of the face. If the holistic perception hypothesis is true, then individuals will confuse emotional facial expressions that are different in terms of the shape of the mouth facial feature. Our stimuli were computer generated using a face graphical rendering tool, which affords a high level of experimental control for perception researchers.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Dehghani, Morteza; Gratch, Jonathan; Sachdeva, Sonya; Sagae, Kenji
Analyzing Conservative and Liberal Blogs Related to the Construction of the 'Ground Zero Mosque' Proceedings Article
In: Annual Conference of the Cognitive Science Society, Boston, MA, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{dehghani_analyzing_2011,
title = {Analyzing Conservative and Liberal Blogs Related to the Construction of the 'Ground Zero Mosque'},
author = {Morteza Dehghani and Jonathan Gratch and Sonya Sachdeva and Kenji Sagae},
url = {http://ict.usc.edu/pubs/Analyzing%20Conservative%20and%20Liberal%20Blogs%20Related%20to%20the%20Construction%20of%20the%20Ground%20Zero%20Mosque.pdf},
year = {2011},
date = {2011-07-01},
booktitle = {Annual Conference of the Cognitive Science Society},
address = {Boston, MA},
abstract = {The issue of the 'Ground Zero Mosque' has been one of the most controversial political issues in US politics in the last several years. Using two different statistical text-analysis techniques, we analyze conservative and liberal blog posts, related to the construction of this Muslim community center and the debates surrounding the issue. In the first experiment, we use a machine learning technique to automatically classify the blogs according to which group wrote them. We also examine the distinctive features that make these blogs liberal or conservative. In the second experiment, by examining posts in consecutive time blocks, we show that there was a significant increase over time in affective processing, and in anger, especially for conservatives. Overall, our results show that there are significant differences in the use of various linguistic features between liberals and conservatives, highlighting the differences between the ideologies and the moral frameworks of the two groups.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Artstein, Ron; Nazarian, Angela; Rushforth, Michael; Traum, David; Sycara, Katia
An Annotation Scheme for Cross-Cultural Argumentation and Persuasion Dialogues Proceedings Article
In: 12th SIGdial Workshop on Discourse and Dialogue, Portland, OR, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{georgila_annotation_2011,
title = {An Annotation Scheme for Cross-Cultural Argumentation and Persuasion Dialogues},
author = {Kallirroi Georgila and Ron Artstein and Angela Nazarian and Michael Rushforth and David Traum and Katia Sycara},
url = {http://www.ict.usc.edu//pubs/W11-2030.pdf},
year = {2011},
date = {2011-06-01},
booktitle = {12th SIGdial Workshop on Discourse and Dialogue},
address = {Portland, OR},
abstract = {We present a novel annotation scheme for cross-cultural argumentation and persuasion dialogues. This scheme is an adaptation of existing coding schemes on negotiation, fol- lowing a review of literature on cross-cultural differences in negotiation styles. The scheme has been refined through application to cod- ing both two-party and multi-party negotia- tion dialogues in three different domains, and is general enough to be applicable to differ- ent domains with few if any extensions. Di- alogues annotated with the scheme have been used to successfully learn culture-specific di- alogue policies for argumentation and persua- sion.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Taylor, Alysa; Gerten, Jillian; Traum, David
Rapid Development of Advanced Question-Answering Characters by Non-experts Proceedings Article
In: 12th SIGdial Workshop on Discourse and Dialogue, Portland, OR, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_rapid_2011,
title = {Rapid Development of Advanced Question-Answering Characters by Non-experts},
author = {Sudeep Gandhe and Alysa Taylor and Jillian Gerten and David Traum},
url = {http://ict.usc.edu/pubs/Rapid%20Development%20of%20Advanced%20Question-Answering%20Characters%20by%20Non-experts.pdf},
year = {2011},
date = {2011-06-01},
booktitle = {12th SIGdial Workshop on Discourse and Dialogue},
address = {Portland, OR},
abstract = {We demonstrate a dialogue system and the accompanying authoring tools that are designed to allow authors with little or no experience in building dialogue systems to rapidly build advanced question-answering characters. To date seven such virtual characters have been built by non-experts using this architecture and tools. Here we demonstrate one such character, PFC Sean Avery, which was developed by a non-expert in 3 months.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Carnevale, Peter; Kim, Yoo Kyoung; Melo, Celso M.; Dehghani, Morteza; Gratch, Jonathan
These Are Ours: The Effects of Ownership and Groups on Property Negotiation Proceedings Article
In: International Association of Conflict Management, Istanbul, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{carnevale_these_2011,
title = {These Are Ours: The Effects of Ownership and Groups on Property Negotiation},
author = {Peter Carnevale and Yoo Kyoung Kim and Celso M. Melo and Morteza Dehghani and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/These%20Are%20Ours.pdf},
year = {2011},
date = {2011-06-01},
booktitle = {International Association of Conflict Management},
address = {Istanbul},
abstract = {Ownership tends to affect negotiation by increasing the value that the negotiator places on the objects being negotiated. In this study, we invented a new computer-controlled negotiation task that presents negotiators pictures of objects on a screen and the negotiators grab the objects, or give them to an opponent, using a mouse. We experimentally varied ownership, telling negotiators in one case that they owned the objects (but needed the other's agreement on the distribution of the objects), or the other owned the objects (but their agreement was needed for distribution), or neither party owned the objects (and both had to agree on the distribution). We also varied whether negotiations were conducted by 3-person groups, or by individuals, and we varied the opponent's behavior in the negotiation (the other consistently demanded almost all the objects, hardly demanded any, or was totally responsive with a Tit-for-Tat strategy on the objects). We also varied the value of the objects, thus giving the task an integrative structure. One result was that groups were more likely than individuals to match the opponent's competitiveness, but only when ownership of the objects was undefined. Ownership, either self, or other, attenuated differences between groups and individuals, an effect not observable in studies that use abstract negotiation tasks or prisoner-dilemma–type games.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, William Yang; Artstein, Ron; Leuski, Anton; Traum, David
Improving Spoken Dialogue Understanding Using Phonetic Mixture Models Proceedings Article
In: The Twenty-Fourth International Florida Artificial Intelligence Research Society Conference, Palm Beach, FL, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{wang_improving_2011,
title = {Improving Spoken Dialogue Understanding Using Phonetic Mixture Models},
author = {William Yang Wang and Ron Artstein and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Improving%20Spoken%20Dialogue%20Understanding%20Using%20Phonetic%20Mixture%20Models.pdf},
year = {2011},
date = {2011-05-01},
booktitle = {The Twenty-Fourth International Florida Artificial Intelligence Research Society Conference},
address = {Palm Beach, FL},
abstract = {Augmenting word tokens with a phonetic representation, derived from a dictionary, improves the performance of a Natural Language Understanding component that interprets speech recognizer output: we observed a 5% to 7% reduction in errors across a wide range of response return rates. The best performance comes from mixture models incorporating both word and phone features. Since the phonetic representa- tion is derived from a dictionary, the method can be applied easily without the need for integration with a specific speech recognizer. The method has similarities with autonomous (or bottom-up) psychological models of lexical access, where contextual information is not integrated at the stage of audi- tory perception but rather later.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Lixing; Morency, Louis-Philippe; Gratch, Jonathan
A Multimodal End-of-Turn Prediction Model: Learning from Parasocial Consensus Sampling Proceedings Article
In: 10th International Conference on Autonomous Agents and Multiagent System, Taipei, Taiwan, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{huang_multimodal_2011,
title = {A Multimodal End-of-Turn Prediction Model: Learning from Parasocial Consensus Sampling},
author = {Lixing Huang and Louis-Philippe Morency and Jonathan Gratch},
url = {http://www.ict.usc.edu//pubs/A Multimodal End-of-Turn Prediction Model- Learning from Parasocial Consensus Sampling.pdf},
year = {2011},
date = {2011-05-01},
booktitle = {10th International Conference on Autonomous Agents and Multiagent System},
address = {Taipei, Taiwan},
abstract = {Virtual humans, with realistic behaviors and increasingly human-like social skills, evoke in users a range of social behaviors normally only seen in human face-to-face interactions. One of the key challenges in creating such virtual humans is giving them human-like conversational skills. Traditional conversational virtual humans usually make turn-taking decisions depending on explicit cues, such as "press-to-talk buttons", from the human users. In contrast, people decide when to take turns by observing their conversational partner's behavior. In this paper, we present a multimodal end-of-turn prediction model. Instead of recording face-to-face conversations, we collect the turn-taking data using Parasocial Consensus Sampling (PCS) framework, where participants are guided to interact with media representation of people parasocially. Then, we analyze the relationship between verbal and nonverbal features and turn-taking behavior using the consensus data and show how these features influence the time people use to take turns. Finally, we present a probabilistic multimodal end-of-turn prediction model learned from the consensus data, which enables virtual humans to make real-time turn-taking predictions. The evaluation results show that our model achieves a high accuracy and takes human-like pauses, in terms of length, before taking its turns. Our work demonstrates the validity of Parasocial Consensus Sampling and generalizes this framework to model turn-taking behavior.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Carnevale, Peter; Gratch, Jonathan
The Effect of Expression of Anger and Happiness in Computer Agents on Negotiations with Humans Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Taipei, Taiwan, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{de_melo_effect_2011,
title = {The Effect of Expression of Anger and Happiness in Computer Agents on Negotiations with Humans},
author = {Celso M. Melo and Peter Carnevale and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Expression%20of%20Anger%20and%20Happiness%20in%20Computer%20Agents%20on%20Negotiations%20with%20Humans.pdf},
year = {2011},
date = {2011-05-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Taipei, Taiwan},
abstract = {There is now considerable evidence in social psychology, economics, and related disciplines that emotion plays an important role in negotiation. For example, humans make greater concessions in negotiation to an opposing human who expresses anger, and they make fewer concessions to an opponent who expresses happiness, compared to a no-emotion-expression control. However, in AI, despite the wide interest in negotiation as a means to resolve differences between agents and humans, emotion has been largely ignored. This paper explores whether expression of anger or happiness by computer agents, in a multi- issue negotiation task, can produce effects that resemble effects seen in human-human negotiation. The paper presents an experiment where participants play with agents that express emotions (anger vs. happiness vs. control) through different modalities (text vs. facial displays). An important distinction in our experiment is that participants are aware that they negotiate with computer agents. The data indicate that the emotion effects observed in past work with humans also occur in agent-human negotiation, and occur independently of modality of expression. The implications of these results are discussed for the fields of automated negotiation, intelligent virtual agents and artificial intelligence.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Traum, David
NPCEditor: A Tool for Building Question-Answering Characters Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Valletta, Malta, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_npceditor_2011,
title = {NPCEditor: A Tool for Building Question-Answering Characters},
author = {Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/NPCEditor-%20A%20Tool%20for%20Building%20Question-Answering%20Characters.pdf},
year = {2011},
date = {2011-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Valletta, Malta},
abstract = {NPCEditor is a system for building and deploying virtual characters capable of engaging a user in spoken dialog on a limited domain. The dialogue may take any form as long as the character responses can be specified a priori. For example, NPCEditor has been used for constructing question answering characters where a user asks questions and the character responds, but other scenarios are possible. At the core of the system is a state of the art statistical language classification technology for mapping from user’s text input to system responses. NPCEditor combines the classifier with a database that stores the character information and relevant language data, a server that allows the character designer to deploy the completed characters, and a user-friendly editor that helps the designer to accomplish both character design and deployment tasks. In the paper we define the overall system architecture, describe individual NPCEditor components, and guide the reader through the steps of building a virtual character.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Grace; Tosch, Emma; Artstein, Ron; Leuski, Anton; Traum, David
Evaluating Conversational Characters Created through Question Generation Proceedings Article
In: 24th Florida Artificial Intelligence Research Society Conference (FLAIRS-24), Palm Beach, FL, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{chen_evaluating_2011,
title = {Evaluating Conversational Characters Created through Question Generation},
author = {Grace Chen and Emma Tosch and Ron Artstein and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Evaluating%20Conversational%20Characters%20Created%20through%20Question%20Generation.pdf},
year = {2011},
date = {2011-05-01},
booktitle = {24th Florida Artificial Intelligence Research Society Conference (FLAIRS-24)},
address = {Palm Beach, FL},
abstract = {Question generation tools can be used to extract a question- answer database from text articles. We investigate how suit- able this technique is for giving domain-specific knowledge to conversational characters. We tested these characters by collecting questions and answers from naive participants, running the questions through the character, and comparing the system responses to the participant answers. Characters gave a full or partial answer to 53% of the user questions which had an answer available in the source text, and 43% of all questions asked. Performance was better for questions asked after the user had read the source text, and also var- ied by question type: the best results were answers to who questions, while answers to yes/no questions were among the poorer performers. The results show that question generation is a promising method for creating a question answering con- versational character from an existing text.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; McCall, Cade; Gratch, Jonathan; Blascovich, James J.; Gandhe, Sudeep
Does it matter if a computer jokes? Proceedings Article
In: ACM SIGCHI, pp. 77–86, Vancouver, Canada, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{khooshabeh_does_2011,
title = {Does it matter if a computer jokes?},
author = {Peter Khooshabeh and Cade McCall and Jonathan Gratch and James J. Blascovich and Sudeep Gandhe},
url = {http://ict.usc.edu/pubs/Does%20it%20matter%20if%20a%20computer%20jokes.pdf},
year = {2011},
date = {2011-05-01},
booktitle = {ACM SIGCHI},
pages = {77–86},
address = {Vancouver, Canada},
abstract = {The goal here was to determine whether computer interfaces are capable of social influence via humor. Users interacted with a natural language capable virtual agent that told persuasive information, and they were given the option to use information from the dialogue in order to complete a problem-solving task. Individuals interacting with an ostensibly humorous virtual agent were influenced by it such that those who judged the agent unfunny were less likely to be persuaded and departed from the agent's suggestions. We discuss the implications of these results for HCI involving natural language systems and virtual agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jung, Yvonne; Kuijper, Arjan; Kipp, Michael; Miksatko, Jan; Gratch, Jonathan; Thalmann, Daniel
Believable Virtual Characters in Human-Computer Dialogs Proceedings Article
In: EUROGRAPHICS, Llandudno, United Kingdom, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jung_believable_2011,
title = {Believable Virtual Characters in Human-Computer Dialogs},
author = {Yvonne Jung and Arjan Kuijper and Michael Kipp and Jan Miksatko and Jonathan Gratch and Daniel Thalmann},
url = {http://ict.usc.edu/pubs/Believable%20Virtual%20Characters%20in%20Human-Computer%20Dialogs.pdf},
year = {2011},
date = {2011-04-01},
booktitle = {EUROGRAPHICS},
address = {Llandudno, United Kingdom},
abstract = {For many application areas, where a task is most naturally represented by talking or where standard input devices are difficult to use or not available at all, virtual characters can be well suited as an intuitive man-machine interface due to their inherent ability to simulate verbal as well as nonverbal communicative behavior. This type of interface is made possible with the help of multimodal dialog systems, which extend common speech dialog systems with additional modalities just like in human-human interaction. Multimodal dialog systems consist at least of an auditive and graphical component, and communication is based on speech and nonverbal communication alike. However, employing virtual characters as personal and believable dialog partners in multimodal dialogs entails several challenges, because this requires not only a reliable and consistent motion and dialog behavior but also regarding nonverbal communication and affective components. Besides modeling the "mind" and creating intelligent communication behavior on the encoding side, which is an active field of research in artificial intelligence, the visual representation of a character including its perceivable behavior, from a decoding perspective, such as facial expressions and gestures, belongs to the domain of computer graphics and likewise implicates many open issues concerning natural communication. Therefore, in this report we give a comprehensive overview how to go from communication models to actual animation and rendering.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Ghosh, Abhijeet; Debevec, Paul; Morency, Louis-Philippe
Effect of Illumination on Automatic Expression Recognition: A Novel 3D Relightable Facial Database Proceedings Article
In: Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition, Santa Barbara, CA, 2011.
Abstract | Links | BibTeX | Tags: Graphics, Virtual Humans
@inproceedings{stratou_effect_2011,
title = {Effect of Illumination on Automatic Expression Recognition: A Novel 3D Relightable Facial Database},
author = {Giota Stratou and Abhijeet Ghosh and Paul Debevec and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Effect%20of%20Illumination%20on%20Automatic%20Expression%20Recognition-%20A%20Novel%203D%20Relightable%20Facial%20Database.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition},
address = {Santa Barbara, CA},
abstract = {One of the main challenges in facial expression recognition is illumination invariance. Our long-term goal is to develop a system for automatic facial expression recognition that is robust to light variations. In this paper, we introduce a novel 3D Relightable Facial Expression (ICT-3DRFE) database that enables experimentation in the fields of both computer graphics and computer vision. The database contains 3D models for 23 subjects and 15 expressions, as well as photometric information that allow for photorealistic rendering. It is also facial action units annotated, using FACS standards. Using the ICT-3DRFE database we create an image set of different expressions/illuminations to study the effect of illumination on automatic expression recognition. We compared the output scores from automatic recognition with expert FACS annotations and found that they agree when the illumination is uniform. Our results show that the output distribution of the automatic recognition can change significantly with light variations and sometimes causes the discrimination of two different expressions to be diminished. We propose a ratio-based light transfer method, to factor out unwanted illuminations from given images and show that it reduces the effect of illumination on expression recognition.},
keywords = {Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Campbell, Julia; Core, Mark; Artstein, Ron; Armstrong, Lindsay; Hartholt, Arno; Wilson, Cyrus A.; Georgila, Kallirroi; Morbini, Fabrizio; Haynes, Kip; Gomboc, Dave; Birch, Mike; Bobrow, Jonathan; Lane, H. Chad; Gerten, Jillian; Leuski, Anton; Traum, David; Trimmer, Matthew; DiNinni, Rich; Bosack, Matthew; Jones, Timothy; Clark, Richard E.; Yates, Kenneth A.
Developing INOTS to Support Interpersonal Skills Practice Proceedings Article
In: IEEE Aerospace Conference, 2011.
Abstract | Links | BibTeX | Tags: Learning Sciences, Virtual Humans
@inproceedings{campbell_developing_2011,
title = {Developing INOTS to Support Interpersonal Skills Practice},
author = {Julia Campbell and Mark Core and Ron Artstein and Lindsay Armstrong and Arno Hartholt and Cyrus A. Wilson and Kallirroi Georgila and Fabrizio Morbini and Kip Haynes and Dave Gomboc and Mike Birch and Jonathan Bobrow and H. Chad Lane and Jillian Gerten and Anton Leuski and David Traum and Matthew Trimmer and Rich DiNinni and Matthew Bosack and Timothy Jones and Richard E. Clark and Kenneth A. Yates},
url = {http://ict.usc.edu/pubs/Developing%20INOTS%20to%20Support%20Interpersonal%20Skills%20Practice.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Aerospace Conference},
abstract = {The Immersive Naval Officer Training System (INOTS) is a blended learning environment that merges traditional classroom instruction with a mixed reality training setting. INOTS supports the instruction, practice and assessment of interpersonal communication skills. The goal of INOTS is to provide a consistent training experience to supplement interpersonal skills instruction for Naval officer candidates without sacrificing trainee throughput and instructor control. We developed an instructional design from cognitive task analysis interviews with experts to serve as a framework for system development. We also leveraged commercial student response technology and research technologies including natural language recognition, virtual humans, realistic graphics, intelligent tutoring and automated instructor support tools. In this paper, we describe our methodologies for developing a blended learning environment, and our challenges adding mixed reality and virtual human technologies to a traditional classroom to support interpersonal skills training.},
keywords = {Learning Sciences, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul
Rethinking Cognitive Architecture via Graphical Models Journal Article
In: Cognitive Systems Research, 2011.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@article{rosenbloom_rethinking_2011,
title = {Rethinking Cognitive Architecture via Graphical Models},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Rethinking%20Cognitive%20Architecture%20via%20Graphical%20Models.pdf},
year = {2011},
date = {2011-03-01},
journal = {Cognitive Systems Research},
abstract = {Cognitive architectures need to resolve the diversity dilemma – i.e., to blend diversity and simplicity – in order to couple functionality and efficiency with integrability, extensibility and maintainability. Building diverse architectures upon a uniform implementation level of graphical models is an intriguing approach because of the homogeneous manner in which such models produce state-of-the-art algorithms spanning symbol, probability and signal processing. To explore this approach a hybrid (discrete and continuous) mixed (Boolean and Bayesian) version of the Soar architecture is being implemented via graphical models. Initial steps reported here begin to show the potential of such an approach for cognitive architecture.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Courtney, Chris; Arizmendi, Brian; Dawson, Michael E.
Virtual Reality Stroop Task for Neurocognitive Assessment Journal Article
In: Studies in Health Technology and Informatics, vol. 143, pp. 433–439, 2011.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{parsons_virtual_2011,
title = {Virtual Reality Stroop Task for Neurocognitive Assessment},
author = {Thomas D. Parsons and Chris Courtney and Brian Arizmendi and Michael E. Dawson},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Stroop%20Task%20for%20Neurocognitive%20Assessment.pdf},
doi = {10.3233/978-1-60750-706-2-433},
year = {2011},
date = {2011-01-01},
journal = {Studies in Health Technology and Informatics},
volume = {143},
pages = {433–439},
abstract = {Given the prevalence of traumatic brain injury (TBI), and the fact that many mild TBIs have no external marker of injury, there is a pressing need for innovative assessment technology. The demand for assessment that goes beyond traditional paper-and-pencil testing has resulted in the use of automated cognitive testing for increased precision and efficiency; and the use of virtual environment technology for enhanced ecological validity and increased function-based assessment. To address these issues, a Virtual Reality Stroop Task (VRST) that involves the subject being immersed in a virtual Humvee as Stroop stimuli appear on the windshield was developed. This study is an initial validation of the VRST as an assessment of neurocognitive functioning. When compared to the paper-and pencil, as well as Automated Neuropsychological Assessment Metrics versions of the Stroop, the VRST appears to have enhanced capacity for providing an indication of a participant's reaction time and ability to inhibit a prepotent response while immersed in a military relevant simulation that presents psychophysiologically arousing high and low threat stimuli.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Gratch, Jonathan
People Like Virtual Counselors That Highly-Disclose About Themselves Proceedings Article
In: The Annual Review of Cybertherapy and Telemedicine, Gatineau, Canada, 2011.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kang_people_2011,
title = {People Like Virtual Counselors That Highly-Disclose About Themselves},
author = {Sin-Hwa Kang and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/People%20Like%20Virtual%20Counselors%20That%20Highly-Disclose%20About%20Themselves.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {The Annual Review of Cybertherapy and Telemedicine},
address = {Gatineau, Canada},
abstract = {In this paper, we describe our findings from research designed to explore the effect of self-disclosure between virtual human counselors (interviewers) and human users (interviewees) on users' social responses in counseling sessions. To investigate this subject, we designed an experiment involving three conditions of self-disclosure: high-disclosure, low-disclosure, and non-disclosure. We measured users' sense of co-presence and social attraction to virtual counselors. The results demonstrated that users reported more co-presence and social attraction to virtual humans who disclosed highly intimate information about themselves than when compared to other virtual humans who disclosed less intimate or no information about themselves. In addition, a further analysis of users' verbal self-disclosure showed that users revealed a medium level of personal information more often when interacting with virtual humans that highly disclosed about themselves, than when interacting with virtual humans disclosing less intimate or no information about themselves.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Bejan, Cosmin Adrian; Sagae, Kenji
Commonsense Causal Reasoning Using Millions of Personal Stories Proceedings Article
In: 25th Conference on Artificial Intelligence (AAAI-11), San Francisco, CA, 2011.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gordon_commonsense_2011,
title = {Commonsense Causal Reasoning Using Millions of Personal Stories},
author = {Andrew S. Gordon and Cosmin Adrian Bejan and Kenji Sagae},
url = {http://ict.usc.edu/pubs/Commonsense%20Causal%20Reasoning%20Using%20Millions%20of%20Personal%20Stories.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {25th Conference on Artificial Intelligence (AAAI-11)},
address = {San Francisco, CA},
abstract = {The personal stories that people write in their Internet weblogs include a substantial amount of information about the causal relationships between everyday events. In this paper we describe our efforts to use millions of these stories for automated commonsense causal reasoning. Casting the commonsense causal reasoning problem as a Choice of Plausible Alternatives, we describe four experiments that compare various statistical and information retrieval approaches to exploit causal information in story corpora. The top performing system in these experiments uses a simple co-occurrence statistic between words in the causal antecedent and consequent, calculated as the Pointwise Mutual Information between words in a corpus of millions of personal stories.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Lange, Belinda; Buckwalter, John Galen; Forbell, Eric; Kim, Julia; Sagae, Kenji; Williams, Josh; Rothbaum, Barbara O.; Difede, JoAnn; Reger, Greg; Parsons, Thomas D.; Kenny, Patrick G.
An Intelligent Virtual Human System for Providing Healthcare Information and Support Journal Article
In: Medicine Meets Virtual Reality, vol. 18, pp. 503–509, 2011.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_intelligent_2011,
title = {An Intelligent Virtual Human System for Providing Healthcare Information and Support},
author = {Albert Rizzo and Belinda Lange and John Galen Buckwalter and Eric Forbell and Julia Kim and Kenji Sagae and Josh Williams and Barbara O. Rothbaum and JoAnn Difede and Greg Reger and Thomas D. Parsons and Patrick G. Kenny},
url = {http://ict.usc.edu/pubs/An%20Intelligent%20Virtual%20Human%20System%20for%20Providing%20Healthcare%20Information%20and%20Support.pdf},
doi = {10.3233/978-1-60750-706-2-503},
year = {2011},
date = {2011-01-01},
journal = {Medicine Meets Virtual Reality},
volume = {18},
pages = {503–509},
abstract = {Over the last 15 years, a virtual revolution has taken place in the use of Virtual Reality simulation technology for clinical purposes. Shifts in the social and scientific landscape have now set the stage for the next major movement in Clinical Virtual Reality with the "birth" of intelligent virtual humans. Seminal research and development has appeared in the creation of highly interactive, artificially intelligent and natural language capable virtual human agents that can engage real human users in a credible fashion. No longer at the level of a prop to add context or minimal faux interaction in a virtual world, virtual humans can be designed to perceive and act in a 3D virtual world, engage in spoken dialogues with real users and can be capable of exhibiting human-like emotional reactions. This paper will present an overview of the SimCoach project that aims to develop virtual human support agents to serve as online guides for promoting access to psychological healthcare information and for assisting military personnel and family members in breaking down barriers to initiating care. The SimCoach experience is being designed to attract and engage military Service Members, Veterans and their significant others who might not otherwise seek help with a live healthcare provider. It is expected that this experience will motivate users to take the first step – to empower themselves to seek advice and information regarding their healthcare and general personal welfare and encourage them to take the next step towards seeking more formal resources if needed.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Reger, Greg; Parsons, Thomas D.; Gahm, Greg; Rizzo, Albert
Virtual Reality Assessment of Cognitive Functions: A Promising Tool to Improve Ecological Validity Journal Article
In: Brain Injury Professional, vol. 7, pp. 24–26, 2011.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{reger_virtual_2011,
title = {Virtual Reality Assessment of Cognitive Functions: A Promising Tool to Improve Ecological Validity},
author = {Greg Reger and Thomas D. Parsons and Greg Gahm and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Assessment%20of%20Cognitive%20Functions.pdf},
year = {2011},
date = {2011-01-01},
journal = {Brain Injury Professional},
volume = {7},
pages = {24–26},
abstract = {Military service in Iraq and Afghanistan comes with the risk of exposure to improvised explosive devices (IEDs), vehicle borne IEDs, rockets, mortars and other blasts. Vehicle roll-over accidents, small arms fire and other non-battle injuries also occur. Accordingly, service members deployed in support of Operation Iraqi Freedom and Operation Enduring Freedom are at increased risk of traumatic brain injuries (TBIs). Since 2000, over 169,000 service members have been diagnosed with a TBI1 (Department of Defense, 2010) and the RAND Corporation reported that nearly one in five service members who deployed to Iraq or Afghanistan reported a probable TBI (Tanielian and Jaycox, 2008).2 Although mild TBIs, or concussions, typically result in full recovery following a brief period of time, more serious injuries can result in new symptoms or changes in functioning and behavior. Some of these changes occur in cognitive domains such as attention, memory, executive functions, language, spatial abilities and psychomotor skills. These changes are usually documented with paper and pencil tests that compare the service member's cognitive performance to that of their peers. For the comparison to be valid, these tests must be administered in a similar manner to that used to determine the norms – typically quiet, well-controlled environments that minimize distractions and maximize best effort. Cognitive tests can serve a number of clinical purposes including accurate diagnosis, informing the level of care a patient requires, treatment planning and treatment evaluation (Lezak, et al., 2004).3 Repeated assessments can also characterize the nature of the injury and document any changes over time. Providers in both civilian and military contexts have increasingly been asked to use neuropsychological test performances to make recommendations about patients' everyday functioning (Lynch, 2008).4 In the civilian sector, these questions may relate to driving or activities of daily living, whereas clinicians working in the deployed environment or at military treatment facilities may use cognitive assessments to inform questions related to fitness for duty. For example, deployed commanders may have referral questions related to the safety of personnel to perform basic tactical skills. On the home front, military neuropsychologists may be consulted as part of a "fitness for duty" evaluation that is conducted when impairments significantly interfere with work performance. In addition, there is increasing interest in the assessment of the severity of functional impairment following TBI. The complexity and lethality of modern warfare place great demands on a service member's neurocognitive resources. At varying levels of threat, service members must be able to exercise control of cognitive functions. It may be challenging to interpret the results of traditional cognitive assessment tools to answer military specific questions. With tremendous individual variability in responses to stress, how well does performance during a well-controlled cognitive assessment predict performance during the stresses of war? It is not known, for example, how well a service member with low average mental efficiency or processing speed following a TBI will react to fire during a tactical convoy. Is this individual fit for combat duty? What kind of performance is required on cognitive tests for a service member to be judged fit to man an automatic weapon during a convoy? Following a mild TBI, how do we assess the functional impairment of service members whose occupational environment has significant, unpredictable low and high intensity stress? Hence, for a measure to be relevant to an assessment of service member neurocognitive functioning, it should provide some indication of a service member's cognitive performance within high and low threat settings. Questions such as these relate to concerns about tests' ecological validity – the degree to which performance on cognitive tests accurately predict future behavior in the real world. Although some tests have demonstrated evidence of ecological validity5 (Strauss, et al., 2006). developments in the area of virtual reality may offer new opportunities to improve ecological validity and inform key questions related to the post- TBI assessment of service members.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Tomai, Emmett; Thapa, Laxman; Gordon, Andrew S.; Kang, Sin-Hwa
Causality in Hundreds of Narratives of the Same Events Proceedings Article
In: The Fourth Workshop on Intelligent Narrative Technologies at the 2011 AI and Interactive Digital Entertainment Conference (AIIDE), Stanford, CA, 2011.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{tomai_causality_2011,
title = {Causality in Hundreds of Narratives of the Same Events},
author = {Emmett Tomai and Laxman Thapa and Andrew S. Gordon and Sin-Hwa Kang},
url = {http://ict.usc.edu/pubs/Causality%20in%20Hundreds%20of%20Narratives%20of%20the%20Same%20Events.PDF},
year = {2011},
date = {2011-01-01},
booktitle = {The Fourth Workshop on Intelligent Narrative Technologies at the 2011 AI and Interactive Digital Entertainment Conference (AIIDE)},
address = {Stanford, CA},
abstract = {Empirical research supporting computational models of narrative is often constrained by the lack of large-scale corpora with deep annotation. In this paper, we report on our annotation and analysis of a dataset of 283 individual narrations of the events in two short video clips. The utterances in the narrative transcripts were annotated to align with known events in the source videos, offering a unique opportunity to study the regularities and variations in the way that different people describe the exact same set of events. We identified the causal relationships between events in the two video clips, and investigated the role that causality plays in determining whether subjects will mention a particular story event and the likelihood that these events will be told in the order that they occurred in the original videos.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Parsons, Thomas D.; Lange, Belinda; Kenny, Patrick G.; Buckwalter, John Galen; Rothbaum, Barbara O.; Difede, JoAnn; Frazier, John; Newman, Brad
Virtual Reality Goes to War: A Brief Review of the Future of Military Behavioral Healthcare Journal Article
In: Journal of Clinical Psychology in Medical Settings, vol. 18, pp. 176–187, 2011.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_virtual_2011-1,
title = {Virtual Reality Goes to War: A Brief Review of the Future of Military Behavioral Healthcare},
author = {Albert Rizzo and Thomas D. Parsons and Belinda Lange and Patrick G. Kenny and John Galen Buckwalter and Barbara O. Rothbaum and JoAnn Difede and John Frazier and Brad Newman},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Goes%20to%20War.pdf},
year = {2011},
date = {2011-01-01},
journal = {Journal of Clinical Psychology in Medical Settings},
volume = {18},
pages = {176–187},
abstract = {Numerous reports indicate that the incidence of posttraumatic stress disorder (PTSD) in returning OEF/OIF military personnel is creating a significant healthcare challenge. These findings have served to motivate research on how to better develop and disseminate evidence-based treatments for PTSD. Virtual Reality delivered exposure therapy for PTSD has been previously used with reports of positive outcomes. This article details how virtual reality applications are being designed and implemented across various points in the military deployment cycle to prevent, identify and treat combat-related PTSD in OIF/OEF Service Members and Veterans. The summarized projects in these areas have been developed at the University of Southern California Institute for Creative Technologies, a U.S. Army University Affiliated Research Center, and this paper will detail efforts to use virtual reality to deliver exposure therapy, assess PTSD and cognitive function and provide stress resilience training prior to deployment.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.
Affect-sensitive Virtual Standardized Patient Interface System Book Section
In: Clinical Technologies: Concepts, Methodologies, Tools and Applications, vol. 3, 2011.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@incollection{parsons_affect-sensitive_2011,
title = {Affect-sensitive Virtual Standardized Patient Interface System},
author = {Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/Affect-Sensitive%20Virtual%20Standardized%20Patient%20Interface%20System.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {Clinical Technologies: Concepts, Methodologies, Tools and Applications},
volume = {3},
abstract = {Virtual Standardized Patients (VSPs) are advanced conversational virtual human agents that have been applied to training of clinicians. These interactive agents portray standardized patient scenarios involving VSPs with clinical or physical conditions. VSPs are capable of verbal and nonverbal interaction (receptive and expressive communication) with a clinician in an effort to enhance differential diagnosis of psychiatric disorders and teach interpersonal skills. This chapter describes the design and development of both software to create social interaction modules on a VSP platform and individualized affective models for affect recognition. This author describes clinically relevant scenarios for affect elicitation and protocols for reliable affect recognition. Further, there is an elucidation of a VSP interface system that has the capacity to monitor the trainee's affective response using physiological signals. Research findings will be summarized from studies on (1) the usability and applicability of VSPs with training clinicians on various mental health disorders (e.g., adolescent male with conduct disorder; adolescent female who has recently been physically traumatized); and (2) preliminary use of the affect-sensitive system to systematically assess and manipulate aspects of VSPs to more fully develop cognitive and affective models of virtual humans with pathological characteristics.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gratch, Jonathan
The Sciences of the Artificial Emotions: Commentary on Aylett & Paiva Journal Article
In: Emotion Review, no. 4(3), pp. 266–268, 2011.
Links | BibTeX | Tags: Virtual Humans
@article{gratch_sciences_2011,
title = {The Sciences of the Artificial Emotions: Commentary on Aylett & Paiva},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Sciences%20of%20the%20Artificial%20Emotions-%20Commentary%20on%20Aylett%20and%20Paiva.pdf},
year = {2011},
date = {2011-01-01},
journal = {Emotion Review},
number = {4(3)},
pages = {266–268},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2010
Rosenbloom, Paul
Computing and computation Journal Article
In: The Computer Journal, vol. 55, no. 7, pp. 820–824, 2010.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@article{rosenbloom_computing_2010,
title = {Computing and computation},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Computing%20and%20computation.pdf},
doi = {10.1093/comjnl/bxs070},
year = {2010},
date = {2010-12-01},
journal = {The Computer Journal},
volume = {55},
number = {7},
pages = {820–824},
abstract = {In this essay we claim that computing is the fourth great scientific domain, on par with the physical, life, and social sciences.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kenny, Patrick G.; Parsons, Thomas D.
Embodied Conversational Virtual Patients Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), Orlando, FL, 2010.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_embodied_2010,
title = {Embodied Conversational Virtual Patients},
author = {Patrick G. Kenny and Thomas D. Parsons},
url = {http://www.ict.usc.edu//pubs/Embodied%20Conversational%20Virtual%20Patients.pdf},
year = {2010},
date = {2010-12-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {Recent research has established the potential for computer generated virtual characters to act as virtual patients (VP) for the assessment and training of novice clinicians in interpersonal skills, interviewing, and diagnosis. These VPs are embodied interactive conversational agents who are designed to simulate a particular clinical presentation of a patient's illness with a high degree of consistency and realism. In this chapter we describe the architecture developed for virtual patients, and the application of the system to subject testing with virtual patients that exhibit a set of clinical conditions called Post Traumatic Stress Disorder (PTSD). The primary goal of these conversational agents was evaluative: can a VP generate responses that elicit user questions relevant for PTSD categorization? The results of the interactions of clinical students with the VP will be discussed. This chapter also highlights a set of design goals for increasing the visual, physical and cognitive realism when building VP systems including the design of the language, scenarios and artwork that is important when developing these characters. Finally, future research directions and challenges will be discussed for conversational virtual patients.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Traum, David; Artstein, Ron; Noren, Dan; Debevec, Paul; Bronnenkant, Kerry; Williams, Josh; Leuski, Anton; Narayanan, Shrikanth; Piepol, Diane; Lane, H. Chad; Morie, Jacquelyn; Aggarwal, Priti; Liewer, Matt; Chiang, Jen-Yuan; Gerten, Jillian; Chu, Selina; White, Kyle
Virtual Museum Guides Demonstration Proceedings Article
In: IEEE Workshop on Spoken Language Technology, Berkeley, CA, 2010.
Links | BibTeX | Tags: Graphics, Learning Sciences, Virtual Humans
@inproceedings{swartout_virtual_2010,
title = {Virtual Museum Guides Demonstration},
author = {William Swartout and David Traum and Ron Artstein and Dan Noren and Paul Debevec and Kerry Bronnenkant and Josh Williams and Anton Leuski and Shrikanth Narayanan and Diane Piepol and H. Chad Lane and Jacquelyn Morie and Priti Aggarwal and Matt Liewer and Jen-Yuan Chiang and Jillian Gerten and Selina Chu and Kyle White},
url = {http://ict.usc.edu/pubs/Virtual%20Museum%20Guides%20Demonstration.pdf},
year = {2010},
date = {2010-12-01},
booktitle = {IEEE Workshop on Spoken Language Technology},
address = {Berkeley, CA},
keywords = {Graphics, Learning Sciences, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}