Publications
Search
Pynadath, David V.; Wang, Ning; Rovira, Ericka; Barnes, Michael J.
Clustering Behavior to Recognize Subjective Beliefs in Human-Agent Teams Proceedings Article
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1495–1503, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
@inproceedings{pynadath_clustering_2018,
title = {Clustering Behavior to Recognize Subjective Beliefs in Human-Agent Teams},
author = {David V. Pynadath and Ning Wang and Ericka Rovira and Michael J. Barnes},
url = {https://dl.acm.org/citation.cfm?id=3237923},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {1495–1503},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Trust is critical to the success of human-agent teams, and a critical antecedents to trust is transparency. To best interact with human teammates, an agent explain itself so that they understand its decision-making process. However, individual differences among human teammates require that the agent dynamically adjust its explanation strategy based on their unobservable subjective beliefs. The agent must therefore recognize its teammates' subjective beliefs relevant to trust-building (e.g., their understanding of the agent's capabilities and process). We leverage a nonparametric method to enable an agent to use its history of prior interactions as a means for recognizing and predicting a new teammate's subjective beliefs. We first gather data combining observable behavior sequences with survey-based observations of typically unobservable perceptions. We then use a nearest-neighbor approach to identify the prior teammates most similar to the new one. We use these neighbors' responses to infer the likelihood of possible beliefs, as in collaborative filtering. The results provide insights into the types of beliefs that are easy (and hard) to infer from purely behavioral observations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Marge, Matthew; Henry, Cassidy; Artstein, Ron; Traum, David; Voss, Clare R.
Consequences and Factors of Stylistic Differences in Human-Robot Dialogue Proceedings Article
In: Proceedings of the SIGDIAL 2018 Conference, pp. 110–118, Association for Computational Linguistics, Melbourne, Australia, 2018.
@inproceedings{lukin_consequences_2018,
title = {Consequences and Factors of Stylistic Differences in Human-Robot Dialogue},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Matthew Marge and Cassidy Henry and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/papers/W/W18/W18-5012/},
doi = {10.18653/v1/W18-5012},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the SIGDIAL 2018 Conference},
pages = {110–118},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {This paper identifies stylistic differences in instruction-giving observed in a corpus of human-robot dialogue. Differences in verbosity and structure (i.e., single-intent vs. multi-intent instructions) arose naturally without restrictions or prior guidance on how users should speak with the robot. Different styles were found to produce different rates of miscommunication, and correlations were found between style differences and individual user variation, trust, and interaction experience with the robot. Understanding potential consequences and factors that influence style can inform design of dialogue systems that are robust to natural variation from human users.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Bui, Trung; Chang, Walter; Georgila, Kallirroi
Conversational Image Editing: Incremental Intent Identification in a New Dialogue Task Proceedings Article
In: Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue, pp. 284–295, Association for Computational Linguistics, Melbourne, Australia, 2018.
@inproceedings{manuvinakurike_conversational_2018,
title = {Conversational Image Editing: Incremental Intent Identification in a New Dialogue Task},
author = {Ramesh Manuvinakurike and Trung Bui and Walter Chang and Kallirroi Georgila},
url = {https://aclanthology.info/papers/W18-5033/w18-5033},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue},
pages = {284–295},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {We present “conversational image editing”, a novel real-world application domain combining dialogue, visual information, and the use of computer vision. We discuss the importance of dialogue incrementality in this task, and build various models for incremental intent identification based on deep learning and traditional classification algorithms. We show how our model based on convolutional neural networks outperforms models based on random forests, long short term memory networks, and conditional random fields. By training embeddings based on image-related dialogue corpora, we outperform pre-trained out-of-the-box embeddings, for intention identification tasks. Our experiments also provide evidence that incremental intent processing may be more efficient for the user and could save time in accomplishing tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Karkada, Deepthi; Manuvinakurike, Ramesh; Georgila, Kallirroi
Towards Understanding End-of-trip Instructions in a Taxi Ride Scenario Proceedings Article
In: Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation, arxiv.org, Santa Fe, New Mexico, 2018.
@inproceedings{karkada_towards_2018,
title = {Towards Understanding End-of-trip Instructions in a Taxi Ride Scenario},
author = {Deepthi Karkada and Ramesh Manuvinakurike and Kallirroi Georgila},
url = {https://arxiv.org/abs/1807.03950},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {arxiv.org},
address = {Santa Fe, New Mexico},
abstract = {We introduce a dataset containing human-authored descriptions of target locations in an “end of-trip in a taxi ride” scenario. We describe our data collection method and a novel annotation scheme that supports understanding of such descriptions of target locations. Our dataset contains target location descriptions for both synthetic and real-world images as well as visual annotations (ground truth labels, dimensions of vehicles and objects, coordinates of the target location, distance and direction of the target location from vehicles and objects) that can be used in various visual and language tasks. We also perform a pilot experiment on how the corpus could be applied to visual reference resolution in this domain.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Bharadwaj, Sumanth; Georgila, Kallirroi
A Dialogue Annotation Scheme for Weight Management Chat using the Trans-Theoretical Model of Health Behavior Change Proceedings Article
In: Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation, arxiv.org, Sante Fe, New Mexico, 2018.
@inproceedings{manuvinakurike_dialogue_2018,
title = {A Dialogue Annotation Scheme for Weight Management Chat using the Trans-Theoretical Model of Health Behavior Change},
author = {Ramesh Manuvinakurike and Sumanth Bharadwaj and Kallirroi Georgila},
url = {https://arxiv.org/abs/1807.03948},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {arxiv.org},
address = {Sante Fe, New Mexico},
abstract = {A dialogue annotation scheme for weight management chat using the trans-theoretical model of health behavior change},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Khooshabeh, Peter; Amir, Ori; Gratch, Jonathan
Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing Proceedings Article
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 2224–2226, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
@inproceedings{de_melo_shaping_2018,
title = {Shaping Cooperation between Humans and Agents with Emotion Expressions and Framing},
author = {Celso M. Melo and Peter Khooshabeh and Ori Amir and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3238129},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {2224–2226},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Emotion expressions can help solve social dilemmas where individual interest is pitted against the collective interest. Building on research that shows that emotions communicate intentions to others, we reinforce that people can infer whether emotionally expressive computer agents intend to cooperate or compete. We further show important distinctions between computer agents that are perceived to be driven by humans (i.e., avatars) vs. by algorithms (i.e., agents). Our results reveal that, when the emotion expression reflects an intention to cooperate, participants will cooperate more with avatars than with agents; however, when the emotion reflects an intention to compete, participants cooperate just as little with avatars as with agents. Finally, we present first evidence that the way the dilemma is described - or framed - can influence people's decision-making. We discuss implications for the design of autonomous agents that foster cooperation with humans, beyond what game theory predicts in social dilemmas.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Rovira, Ericka; Barnes, Michael J.
A Nearest-Neighbor Approach to Recognizing Subjective Beliefs in Human-Robot Interaction Proceedings Article
In: Proceedings of The AAAI Workshop on Plan, Activity, and Intent Recognition (PAIR), Association for the Advancement of Artificial Intelligence, London, UK, 2018.
@inproceedings{pynadath_nearest-neighbor_2018,
title = {A Nearest-Neighbor Approach to Recognizing Subjective Beliefs in Human-Robot Interaction},
author = {David V. Pynadath and Ning Wang and Ericka Rovira and Michael J. Barnes},
url = {https://aied2018.utscic.edu.au/proceedings/},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of The AAAI Workshop on Plan, Activity, and Intent Recognition (PAIR)},
publisher = {Association for the Advancement of Artificial Intelligence},
address = {London, UK},
abstract = {Trust is critical to the success of human-robot interaction (HRI), and one of the critical antecedents to trust is transparency. To best interact with human teammates, a robot must be able to ensure that they understand its decision-making process. Recent work has developed automated explanation methods that can achieve this goal. However, individual differences among human teammates require that the robot dynamically adjust its explanation strategy based on their unobservable subjective beliefs. We therefore need methods by which a robot can recognize its teammates’ subjective beliefs relevant to trust-building (e.g., their understanding of the robot’s capabilities and process). We leverage a nonparametric method, common across many fields of artificial intelligence, to enable a robot to use its history of prior interactions as a means for recognizing and predicting a new teammate’s subjective beliefs. We first gather data combining observable behavior sequences with surveybased observations of typically unobservable subjective beliefs. We then use a nearest-neighbor approach to identify the prior teammates most similar to the new one. We use these neighbors to infer the likelihood of possible subjective beliefs, and the results provide insights into the types of subjective beliefs that are easy (and hard) to infer from purely behavioral observations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Monahan, Shannon; Johnson, Emmanuel; Lucas, Gale; Finch, James; Gratch, Jonathan
Autonomous Agent that Provides Automated Feedback Improves Negotiation Skills Book Section
In: Artificial Intelligence in Education, vol. 10948, pp. 225–229, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-93845-5 978-3-319-93846-2.
@incollection{monahan_autonomous_2018,
title = {Autonomous Agent that Provides Automated Feedback Improves Negotiation Skills},
author = {Shannon Monahan and Emmanuel Johnson and Gale Lucas and James Finch and Jonathan Gratch},
url = {http://link.springer.com/10.1007/978-3-319-93846-2_41},
doi = {10.1007/978-3-319-93846-2_41},
isbn = {978-3-319-93845-5 978-3-319-93846-2},
year = {2018},
date = {2018-06-01},
booktitle = {Artificial Intelligence in Education},
volume = {10948},
pages = {225–229},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Research has found that individuals can improve their negotiation abilities by practicing with virtual agents [1, 2]. For these pedagogical agents to become more “intelligent,” the system should be able to give feedback on negotiation performance [3, 4]. In this study, we examined the impact of providing such individualized feedback. Participants first engaged in a negotiation with a virtual agent. After this negotiation, participants were either given automated individualized feedback or not. Feedback was based on negotiation principles [4], which were quantified using a validated approach [5]. Participants then completed a second, parallel negotiation. Our results show that, compared to the control condition, participants who received such feedback after the first negotiation showed a significantly greater improvement in the strength of their first offer, concession curve, and thus their final outcome in the negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Pynadath, David V.; Barnes, Michael J.; Wang, Ning; Chen, Jessie Y. C.
Transparency Communication for Machine Learning in Human-Automation Interaction Book Section
In: Human and Machine Learning, pp. 75–90, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-90402-3 978-3-319-90403-0.
@incollection{pynadath_transparency_2018,
title = {Transparency Communication for Machine Learning in Human-Automation Interaction},
author = {David V. Pynadath and Michael J. Barnes and Ning Wang and Jessie Y. C. Chen},
url = {http://link.springer.com/10.1007/978-3-319-90403-0_5},
doi = {10.1007/978-3-319-90403-0_5},
isbn = {978-3-319-90402-3 978-3-319-90403-0},
year = {2018},
date = {2018-06-01},
booktitle = {Human and Machine Learning},
pages = {75–90},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Technological advances offer the promise of autonomous systems to form human-machine teams that are more capable than their individual members. Understanding the inner workings of the autonomous systems, especially as machine-learning (ML) methods are being widely applied to the design of such systems, has become increasingly challenging for the humans working with them. The “black-box” nature of quantitative ML approaches poses an impediment to people’s situation awareness (SA) of these ML-based systems, often resulting in either disuse or over-reliance of autonomous systems employing such algorithms. Research in human-automation interaction has shown that transparency communication can improve teammates’ SA, foster the trust relationship, and boost the human-automation team’s performance. In this chapter, we will examine the implications of an agent transparency model for human interactions with ML-based agents using automated explanations. We will discuss the application of a particular ML method, reinforcement learning (RL), in Partially Observable Markov Decision Process (POMDP)-based agents, and the design of explanation algorithms for RL in POMDPs.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Pynadath, David V; Wang, Ning; Yang, Richard
Simulating Collaborative Learning through Decision- Theoretic Agents Proceedings Article
In: Proceedings of the Assessment and Intervention during Team Tutoring Workshop, CEUR-WS.org, London, UK, 2018.
@inproceedings{pynadath_simulating_2018,
title = {Simulating Collaborative Learning through Decision- Theoretic Agents},
author = {David V Pynadath and Ning Wang and Richard Yang},
url = {http://ceur-ws.org/Vol-2153/paper5.pdf},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the Assessment and Intervention during Team Tutoring Workshop},
publisher = {CEUR-WS.org},
address = {London, UK},
abstract = {Simulation for team training has a long history of success in medical care and emergency response. In fields where individuals work together to make decisions and perform actions under extreme time pressure and risk (as in military teams), simulations offer safe and repeatable environments for teams to learn and practice without real-world consequences. In our team-based training simulation, we use intelligent agents to represent individual learners and to autonomously generate behavior while learning to perform a joint task. Our agents are built upon PsychSim, a social-simulation framework that uses decision theory to provide domain-independent, quantitative algorithms for representing and reasoning about uncertainty and conflicting goals. We present a collaborative learning testbed in which two PsychSim agents performed a joint “capture-the-flag” mission in the presence of an enemy agent. The testbed supports a reinforcement-learning capability that enables the agents to revise their decision-theoretic models based on their experiences in performing the target task. We can “train” these agents by having them repeatedly perform the task and refine their models through reinforcement learning. We can then “test” the agents by measuring their performance once their learning has converged to a final policy. Repeating this trainand-test cycle across different parameter settings (e.g., priority of individual vs. team goals) and learning configurations (e.g., train with the same teammate vs. train with different teammates) yields a reusable methodology for characterizing the learning outcomes and measuring the impact of such variations on training effectiveness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Huynh, Loc; Chen, Weikai; Saito, Shunsuke; Xing, Jun; Nagano, Koki; Jones, Andrew; Debevec, Paul; Li, Hao
Mesoscopic Facial Geometry Inference Using Deep Neural Networks Proceedings Article
In: Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition, IEEE, Salt Lake City, UT, 2018.
@inproceedings{huynh_mesoscopic_2018,
title = {Mesoscopic Facial Geometry Inference Using Deep Neural Networks},
author = {Loc Huynh and Weikai Chen and Shunsuke Saito and Jun Xing and Koki Nagano and Andrew Jones and Paul Debevec and Hao Li},
url = {http://openaccess.thecvf.com/content_cvpr_2018/papers/Huynh_Mesoscopic_Facial_Geometry_CVPR_2018_paper.pdf},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition},
publisher = {IEEE},
address = {Salt Lake City, UT},
abstract = {We present a learning-based approach for synthesizing facial geometry at medium and fine scales from diffusely-lit facial texture maps. When applied to an image sequence, the synthesized detail is temporally coherent. Unlike current state-of-the-art methods [17, 5], which assume ”dark is deep”, our model is trained with measured facial detail collected using polarized gradient illumination in a Light Stage [20]. This enables us to produce plausible facial detail across the entire face, including where previous approaches may incorrectly interpret dark features as concavities such as at moles, hair stubble, and occluded pores. Instead of directly inferring 3D geometry, we propose to encode fine details in high-resolution displacement maps which are learned through a hybrid network adopting the state-of-the-art image-to-image translation network [29] and super resolution network [43]. To effectively capture geometric detail at both mid- and high frequencies, we factorize the learning into two separate sub-networks, enabling the full range of facial detail to be modeled. Results from our learning-based approach compare favorably with a high-quality active facial scanhening technique, and require only a single passive lighting condition without a complex scanning setup.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Schwartz, David; Goldberg, Stephen L.
An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger Proceedings Article
In: Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE), pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
@inproceedings{wang_analysis_2018,
title = {An Analysis of Student Belief and Behavior in Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and David Schwartz and Stephen L. Goldberg},
url = {http://ceur-ws.org/Vol-2141/paper3.pdf},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the AIED Workshop on Personalized Approaches in Learning Environments (PALE)},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. Using a low-cost and high-speed computer graphics and character animation technology, we created digital doppelgangers of students and placed them in a learning-byexplaining task where they interacted with digital doppelgangers of themselves. We investigate the research question of how does increasing the similarity of the physical appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual human listener in a learning-by-explaining paradigm. It presents an analysis of how students’ perceptions of the resemblance impact their learning experience and outcomes. The analysis and results offer insight into the promise and limitation of the application of this novel technology to pedagogical agents research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew
An Encoder-decoder Approach to Predicting Causal Relations in Stories Proceedings Article
In: Proceedings of the First Workshop on Storytelling, pp. 50–59, Association for Computational Linguistics, New Orleans, LA, 2018.
@inproceedings{roemmele_encoder-decoder_2018,
title = {An Encoder-decoder Approach to Predicting Causal Relations in Stories},
author = {Melissa Roemmele and Andrew Gordon},
url = {http://aclweb.org/anthology/W18-1506},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the First Workshop on Storytelling},
pages = {50–59},
publisher = {Association for Computational Linguistics},
address = {New Orleans, LA},
abstract = {We address the task of predicting causally related events in stories according to a standard evaluation framework, the Choice of Plausible Alternatives (COPA). We present a neural encoder-decoder model that learns to predict relations between adjacent sequences in stories as a means of modeling causality. We explore this approach using different methods for extracting and representing sequence pairs as well as different model architectures. We also compare the impact of different training datasets on our model. In particular, we demonstrate the usefulness of a corpus not previously applied to COPA, the ROCStories corpus. While not state-of-the-art, our results establish a new reference point for systems evaluated on COPA, and one that is particularly informative for future neural-based approaches.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew
Linguistic Features of Helpfulness in Automated Support for Creative Writing Proceedings Article
In: Proceedings of the First Workshop on Storytelling, pp. 14–19, 2018 Association for Computational Linguistics, New Orleans, LA, 2018.
@inproceedings{roemmele_linguistic_2018,
title = {Linguistic Features of Helpfulness in Automated Support for Creative Writing},
author = {Melissa Roemmele and Andrew Gordon},
url = {http://aclweb.org/anthology/W18-1502},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the First Workshop on Storytelling},
pages = {14–19},
publisher = {2018 Association for Computational Linguistics},
address = {New Orleans, LA},
abstract = {We examine an emerging NLP application that supports creative writing by automatically suggesting continuing sentences in a story. The application tracks users’ modifications to generated sentences, which can be used to quantify their “helpfulness” in advancing the story. We explore the task of predicting helpfulness based on automatically detected linguistic features of the suggestions. We illustrate this analysis on a set of user interactions with the application using an initial selection of features relevant to story generation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Karumbaiah, Shamya; Tokel, S. Tugba; Core, Mark G.; Stratou, Giota; Auerbach, Daniel; Georgila, Kallirroi
Engaging with the Scenario: Affect and Facial Patterns from a Scenario-Based Intelligent Tutoring System Proceedings Article
In: Proceeding of the International Conference on Artificial Intelligence in Education, pp. 352–366, Springer International Publishing, London, UK, 2018, ISBN: 978-3-319-93842-4 978-3-319-93843-1.
@inproceedings{nye_engaging_2018,
title = {Engaging with the Scenario: Affect and Facial Patterns from a Scenario-Based Intelligent Tutoring System},
author = {Benjamin D. Nye and Shamya Karumbaiah and S. Tugba Tokel and Mark G. Core and Giota Stratou and Daniel Auerbach and Kallirroi Georgila},
url = {http://link.springer.com/10.1007/978-3-319-93843-1_26},
doi = {10.1007/978-3-319-93843-1_26},
isbn = {978-3-319-93842-4 978-3-319-93843-1},
year = {2018},
date = {2018-06-01},
booktitle = {Proceeding of the International Conference on Artificial Intelligence in Education},
volume = {10947},
pages = {352–366},
publisher = {Springer International Publishing},
address = {London, UK},
abstract = {Facial expression trackers output measures for facial action units (AUs), and are increasingly being used in learning technologies. In this paper, we compile patterns of AUs seen in related work as well as use factor analysis to search for categories implicit in our corpus. Although there was some overlap between the factors in our data and previous work, we also identified factors seen in the broader literature but not previously reported in the context of learning environments. In a correlational analysis, we found evidence for relationships between factors and self-reported traits such as academic effort, study habits, and interest in the subject. In addition, we saw differences in average levels of factors between a video watching activity, and a decision making activity. However, in this analysis, we were not able to isolate any facial expressions having a significant positive or negative relationship with either learning gain, or performance once question difficulty and related factors were also considered. Given the overall low levels of facial affect in the corpus, further research will explore different populations and learning tasks to test the possible hypothesis that learners may have been in a pattern of “Over-Flow” in which they were engaged with the system, but not deeply thinking about the content or their errors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hampton, Andrew J.; Nye, Benjamin D.; Pavlik, Philip I.; Swartout, William R.; Graesser, Arthur C.; Gunderson, Joseph
Mitigating Knowledge Decay from Instruction with Voluntary Use of an Adaptive Learning System Proceedings Article
In: Proceedings of the International Conference on Artificial Intelligence in Education, pp. 119–133, Springer International Publishing, London, UK, 2018, ISBN: 978-3-319-93845-5 978-3-319-93846-2.
@inproceedings{hampton_mitigating_2018,
title = {Mitigating Knowledge Decay from Instruction with Voluntary Use of an Adaptive Learning System},
author = {Andrew J. Hampton and Benjamin D. Nye and Philip I. Pavlik and William R. Swartout and Arthur C. Graesser and Joseph Gunderson},
url = {http://link.springer.com/10.1007/978-3-319-93846-2_23},
doi = {10.1007/978-3-319-93846-2_23},
isbn = {978-3-319-93845-5 978-3-319-93846-2},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the International Conference on Artificial Intelligence in Education},
volume = {10948},
pages = {119–133},
publisher = {Springer International Publishing},
address = {London, UK},
abstract = {Knowledge decays across breaks in instruction. Learners lack the metacognition to self-assess their knowledge decay and effectively self-direct review, as well as lacking interactive exercises appropriate to their individual knowledge level. Adaptive learning systems offer the potential to mitigate these issues, by providing open learner models to facilitate learner’s understanding of their knowledge levels and by presenting personalized practice exercises. The current study analyzes differences in knowledge decay between learners randomly assigned to an intervention where they could use an adaptive system during a long gap between courses, compared with a control condition. The experimental condition used the Personal Assistant for Life-Long Learning (PAL3), a tablet-based adaptive learning system integrating multiple intelligent tutoring systems and conventional learning resources. It contained electronics content relevant to the experiment participants, Navy sailors who graduated from apprentice electronics courses (A-School) awaiting assignment to their next training (C-School). The study was conducted over one month, collecting performance data with a counterbalanced pre-, mid-, and post-test. The control condition exhibited the expected decay. The PAL3 condition showed a significant difference from the control, with no significant knowledge decay in their overall knowledge, despite substantial variance in usage for PAL3 (e.g., most of overall use in the first week, with fewer participants engaging as time went on). Interestingly, while overall decay was mitigated in PAL3, this result was primarily through gains in some knowledge offsetting losses in other knowledge. Overall, these results indicate that adaptive study tools can help prevent knowledge decay, even with voluntary usage.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy; Merchant, Chirag; Schwartz, David; Goldberg, Stephen L.
Learning by Explaining to a Digital Doppelganger Book Section
In: Intelligent Tutoring Systems, vol. 10858, pp. 256–264, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-91463-3 978-3-319-91464-0.
@incollection{wang_learning_2018,
title = {Learning by Explaining to a Digital Doppelganger},
author = {Ning Wang and Ari Shapiro and Andrew Feng and Cindy Zhuang and Chirag Merchant and David Schwartz and Stephen L. Goldberg},
url = {http://link.springer.com/10.1007/978-3-319-91464-0_25},
doi = {10.1007/978-3-319-91464-0_25},
isbn = {978-3-319-91463-3 978-3-319-91464-0},
year = {2018},
date = {2018-05-01},
booktitle = {Intelligent Tutoring Systems},
volume = {10858},
pages = {256–264},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. An emerging computer animation technology makes the creation of digital doppelgangers an accessible reality. This allows researchers in pedagogical agents to explore previously unexplorable research questions, such as how does increasing the similarity in appearance between the agent and the student impact learning. This paper discusses the design and evaluation of a digital doppelganger as a virtual listener in a learning-by-explaining paradigm. Results offer insight into the promise and limitation of this novel technology.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Brixey, Jacqueline; Pincus, Eli; Artstein, Ron
Chahta Anumpa: A Multimodal Corpus of the Choctaw Language Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 3371–3376, ELRA, Miyazaki, Japan, 2018.
@inproceedings{brixey_chahta_2018,
title = {Chahta Anumpa: A Multimodal Corpus of the Choctaw Language},
author = {Jacqueline Brixey and Eli Pincus and Ron Artstein},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/822.html},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {3371–3376},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {This paper presents a general use corpus for the Native American indigenous language Choctaw. The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for the threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Henry, Cassidy; Lukin, Stephanie; Artstein, Ron; Gervitz, Felix; Pollard, Kim; Bonial, Claire; Lei, Su; Voss, Clare R.; Marge, Matthew; Hayes, Cory J.; Hill, Susan G.
Dialogue Structure Annotation for Multi-Floor Interaction Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), pp. 104–111, ELRA, Miyazaki, Japan, 2018, ISBN: 979-10-95546-00-9.
@inproceedings{traum_dialogue_2018,
title = {Dialogue Structure Annotation for Multi-Floor Interaction},
author = {David Traum and Cassidy Henry and Stephanie Lukin and Ron Artstein and Felix Gervitz and Kim Pollard and Claire Bonial and Su Lei and Clare R. Voss and Matthew Marge and Cory J. Hayes and Susan G. Hill},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/672.html},
isbn = {979-10-95546-00-9},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
pages = {104–111},
publisher = {ELRA},
address = {Miyazaki, Japan},
abstract = {We present an annotation scheme for meso-level dialogue structure, specifically designed for multi-floor dialogue. The scheme includes a transaction unit that clusters utterances from multiple participants and floors into units according to realization of an initiator’s intent, and relations between individual utterances within the unit. We apply this scheme to annotate a corpus of multi-floor human-robot interaction dialogues. We examine the patterns of structure observed in these dialogues and present inter-annotator statistics and relative frequencies of types of relations and transaction units. Finally, some example applications of these annotations are introduced.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Brixey, Jacqueline; Bui, Trung; Chang, Walter; Kim, Doo Soon; Artstein, Ron; Georgila, Kallirroi
Edit me: A Corpus and a Framework for Understanding Natural Language Image Editing Proceedings Article
In: Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC), LREC, Miyazaki, Japan, 2018.
@inproceedings{manuvinakurike_edit_2018,
title = {Edit me: A Corpus and a Framework for Understanding Natural Language Image Editing},
author = {Ramesh Manuvinakurike and Jacqueline Brixey and Trung Bui and Walter Chang and Doo Soon Kim and Ron Artstein and Kallirroi Georgila},
url = {http://www.lrec-conf.org/proceedings/lrec2018/pdf/481.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 11th International Conference on Language Resources and Evaluation (LREC)},
publisher = {LREC},
address = {Miyazaki, Japan},
abstract = {This paper introduces the task of interacting with an image editing program through natural language. We present a corpus of image edit requests which were elicited for real world images, and an annotation framework for understanding such natural language instructions and mapping them to actionable computer commands. Finally, we evaluate crowd-sourced annotation as a means of efficiently creating a sizable corpus at a reasonable cost.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2013
Song, Yale; Morency, Louis-Philippe; Davis, Randall
Distribution-Sensitive Learning for Imbalanced Datasets Proceedings Article
In: IEEE Conference on Automatic Face and Gesture Recognition, Shanghai, China, 2013.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{song_distribution-sensitive_2013,
title = {Distribution-Sensitive Learning for Imbalanced Datasets},
author = {Yale Song and Louis-Philippe Morency and Randall Davis},
url = {http://ict.usc.edu/pubs/Distribution-Sensitive%20Learning%20for%20Imbalanced%20Datasets.pdf},
year = {2013},
date = {2013-04-01},
booktitle = {IEEE Conference on Automatic Face and Gesture Recognition},
address = {Shanghai, China},
abstract = {Many real-world face and gesture datasets are by nature imbalanced across classes. Conventional statistical learning models (e.g., SVM, HMM, CRF), however, are sensitive to imbalanced datasets. In this paper we show how an imbalanced dataset affects the performance of a standard learning algorithm, and propose a distribution-sensitive prior to deal with the imbalanced data problem. This prior analyzes the training dataset before learning a model, and puts more weight on the samples from underrepresented classes, allowing all samples in the dataset to have a balanced impact in the learning process. We report on two empirical studies regarding learning with imbalanced data, using two publicly available recent gesture datasets, the Microsoft Research Cambridge-12 (MSRC-12) and NATOPS aircraft handling signals datasets. Experimental results show that learning from balanced data is important, and that the distribution-sensitive prior improves performance with imbalanced datasets.⬚},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Cheng, Lin; Marsella, Stacy C.; Boberg, Jill
Felt Emotion and Social Context Determine the Intensity of Smiles in a Competitive Video Game Proceedings Article
In: 10th IEEE International Conference on Automatic Face and Gesture Recognition, Shanghai, China, 2013.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gratch_felt_2013,
title = {Felt Emotion and Social Context Determine the Intensity of Smiles in a Competitive Video Game},
author = {Jonathan Gratch and Lin Cheng and Stacy C. Marsella and Jill Boberg},
url = {http://ict.usc.edu/pubs/Felt%20Emotion%20and%20Social%20Context%20Determine%20the%20Intensity%20of%20Smiles%20in%20a%20Competitive%20Video%20Game.pdf},
year = {2013},
date = {2013-04-01},
booktitle = {10th IEEE International Conference on Automatic Face and Gesture Recognition},
address = {Shanghai, China},
abstract = {The present study uses automatic facial expression recognition software to examine the relationship between social context and emotional feelings on the expression of emotion, to test claims that facial expressions reflect social motives rather than felt emotion. To vary emotional feelings, participants en- gaged in a competitive video game. Deception was used to sys- tematically manipulate perceptions of winning or losing. To vary social context, participants played either with friends or strangers. The results support the hypothesis of Hess and col- leagues that smiling is determined by both factors. The results further highlight the value of automatic expression recognition technology for psychological research and provide constraints on inferring emotion from facial displays.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Stratou, Giota; Mahmound, Marwa; Boberg, Jill; Gratch, Jonathan; Rizzo, Albert; Morency, Louis-Philippe
Automatic Behavior Descriptors for Psychological Disorder Analysis Proceedings Article
In: IEEE Conference on Automatic Face and Gesture Recognition, Shanghai, China, 2013.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{scherer_automatic_2013,
title = {Automatic Behavior Descriptors for Psychological Disorder Analysis},
author = {Stefan Scherer and Giota Stratou and Marwa Mahmound and Jill Boberg and Jonathan Gratch and Albert Rizzo and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Automatic%20Behavior%20Descriptors%20for%20Psychological%20Disorder%20Analysis.pdf},
year = {2013},
date = {2013-04-01},
booktitle = {IEEE Conference on Automatic Face and Gesture Recognition},
address = {Shanghai, China},
abstract = {We investigate the capabilities of automatic nonverbal behavior descriptors to identify indicators of psychological disorders such as depression, anxiety, and post-traumatic stress disorder. We seek to confirm and enrich present state of the art, predominantly based on qualitative manual annotations, with automatic quantitative behavior descriptors. In this paper, we propose four nonverbal behavior descriptors that can be automatically estimated from visual signals. We introduce a new dataset called the Distress Assessment Interview Corpus (DAIC) which includes 167 dyadic interactions between a confederate interviewer and a paid participant. Our evaluation on this dataset shows correlation of our automatic behavior descriptors with specific psychological disorders as well as a generic distress measure. Our analysis also includes a deeper study of selfadaptor and fidgeting behaviors based on detailed annotations of where these behaviors occur.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Xu, Yuyu; Feng, Andrew W.; Shapiro, Ari
A Simple Method for High Quality Artist-Driven Lip Syncing Proceedings Article
In: ACM Symposium on Interactive 3D Graphics and Games, Orlando, FL, 2013.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{xu_simple_2013,
title = {A Simple Method for High Quality Artist-Driven Lip Syncing},
author = {Yuyu Xu and Andrew W. Feng and Ari Shapiro},
url = {http://ict.usc.edu/pubs/A%20Simple%20Method%20for%20High%20Quality%20Artist-Driven%20Lip%20Syncing.pdf},
year = {2013},
date = {2013-03-01},
booktitle = {ACM Symposium on Interactive 3D Graphics and Games},
address = {Orlando, FL},
abstract = {We demonstrate a real-time lip animation algorithmthat can be used to generate synchronized facial movements with audio generated from a text-to-speech engine or from recorded audio. Our method requires an animator to construct animations using a canonical set of visemes for all pairwise combinations of a reduced phoneme set (diphones). The diphone animations are then stitched together to construct the final animation. This method can be easily retargeted to new faces that use the same set of visemes. Thus, our method can be applied to any character that utilizes the same, small set of facial poses. In addition, our method is editable in that it allows an artist to directly and easily change specific parts of the lip animation algorithm as needed. Our method requires no learning, can work on multiple languages, and is easily replicated. We make available publicly animations for lip syncing English utterances. We present a study showing the subjective quality of our algorithm, and compare it to the results of a popular commercial software package.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Vasylevska, Khrystyna; Kaufmann, Hannes; Bolas, Mark; Suma, Evan
Flexible Spaces: Dynamic Layout Generation for Infinite Walking in Virtual Environments Proceedings Article
In: IEEE Symposium on 3D User Interfaces, Orlando, FL, 2013.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{vasylevska_flexible_2013,
title = {Flexible Spaces: Dynamic Layout Generation for Infinite Walking in Virtual Environments},
author = {Khrystyna Vasylevska and Hannes Kaufmann and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Flexible%20Spaces-%20Dynamic%20Layout%20Generation%20for%20Infinite%20Walking%20in%20Virtual%20Environments.pdf},
year = {2013},
date = {2013-03-01},
booktitle = {IEEE Symposium on 3D User Interfaces},
address = {Orlando, FL},
abstract = {Redirected walking techniques enable natural locomotion through immersive virtual environments (VEs) that are larger than the real world workspace. Most existing techniques rely upon manipulating the mapping between physical and virtual motions while the layout of the environment remains constant. However, if the primary focus of the experience is on the virtual world’s content, rather than on its spatial layout, then the goal of redirected walking can be achieved through an entirely different strategy. In this paper, we introduce flexible spaces – a novel redirection technique that enables infinite real walking in virtual environments that do not require replication of real world layouts. Flexible spaces overcome the limitations and generalize the use of overlapping (impossible) spaces and change blindness by employing procedural layout generation. Our approach allows VE designers to focus on the content of the virtual world independent of the implementation details imposed by real walking, thereby making spatial manipulation techniques more practical for use in a variety of application domains.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Vasylevska, Khrystyna; Kaufmann, Hannes; Bolas, Mark; Suma, Evan
Flexible Spaces: A Virtual Step Outside of Reality Proceedings Article
In: IEEE Virtual Reality, Orlando, FL, 2013.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{vasylevska_flexible_2013-1,
title = {Flexible Spaces: A Virtual Step Outside of Reality},
author = {Khrystyna Vasylevska and Hannes Kaufmann and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Flexible%20Spaces-%20A%20Virtual%20Step%20Outside%20of%20Reality.pdf},
year = {2013},
date = {2013-03-01},
booktitle = {IEEE Virtual Reality},
address = {Orlando, FL},
abstract = {In this paper we introduce the concept of flexible spaces – a novel redirection technique that generalizes the use of overlapping (impossible) spaces and change blindness in an algorithm for dynamic layout generation. Flexible spaces is an impossible environment that violates the real world constancy in favor of providing the experience of seamless, unrestricted natural walking over a large-scale virtual environment (VE).},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Buckwalter, John Galen; Forbell, Eric; Reist, Chris; Difede, JoAnn; Rothbaum, Barbara O.; Lange, Belinda; Koenig, Sebastian; Talbot, Thomas
Virtual Reality Applications to Address the Wounds of War Journal Article
In: Psychiatric Annals, vol. 43, no. 3, pp. 123–138, 2013.
Links | BibTeX | Tags: DoD, MedVR, UARC
@article{rizzo_virtual_2013-2,
title = {Virtual Reality Applications to Address the Wounds of War},
author = {Albert Rizzo and John Galen Buckwalter and Eric Forbell and Chris Reist and JoAnn Difede and Barbara O. Rothbaum and Belinda Lange and Sebastian Koenig and Thomas Talbot},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Applications%20to%20Address%20the%20Wounds%20of%20War.pdf},
year = {2013},
date = {2013-03-01},
journal = {Psychiatric Annals},
volume = {43},
number = {3},
pages = {123–138},
keywords = {DoD, MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Shapiro, Ari; Feng, Andrew W.
The Case for Physics Visualization in an Animator's Toolset Proceedings Article
In: 8th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, Barcelona, Spain, 2013.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{shapiro_case_2013,
title = {The Case for Physics Visualization in an Animator's Toolset},
author = {Ari Shapiro and Andrew W. Feng},
url = {http://ict.usc.edu/pubs/The%20Case%20for%20Physics%20Visualization%20in%20an%20Animator's%20Toolset.pdf},
year = {2013},
date = {2013-02-01},
booktitle = {8th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications},
address = {Barcelona, Spain},
abstract = {By associating physical properties with a digital character's joint and bones, we are able to visualize explicitly a number of properties that can help animators develop high-quality animation. For example, proper ballistic arcs can be shown to demonstrate proper timing and location of a character during flight. In addition, a center of mass that accurately reflects the posture of the character can be shown to help with a balanced appearance during walking or running. In addition, motion properties not previously considered, such as angular momentum, can be easily identified when blatantly violated by an animator. However, very few in-house or commercial system employ such tools, despite their nearly transparent use in an animator's workflow and their utility in generating better-quality motion. In this paper, we argue the case for incorporating such toolset, describe an algorithm for implementing the tools, and detail that types of uses for such a tool.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tsai, Jason; Bowring, Emma; Marsella, Stacy C.; Tambe, Milind
Empirical evaluation of computational fear contagion models in crowd dispersions Journal Article
In: Journal Autonomous Agents and Multi-Agent Systems, 2013.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@article{tsai_empirical_2013,
title = {Empirical evaluation of computational fear contagion models in crowd dispersions},
author = {Jason Tsai and Emma Bowring and Stacy C. Marsella and Milind Tambe},
url = {http://ict.usc.edu/pubs/Empirical%20evaluation%20of%20computational%20fear%20contagion%20models%20in%20crowd%20dispersions.pdf},
year = {2013},
date = {2013-02-01},
journal = {Journal Autonomous Agents and Multi-Agent Systems},
abstract = {In social psychology, emotional contagion describes the widely observed phenomenon of one person’s emotions being influenced by surrounding people’s emotions.While the overall effect is agreed upon, the underlying mechanism of the spread of emotions has seen little quantification and application to computational agents despite extensive evidence of its impacts in everyday life. In this paper, we examine computational models of emotional contagion by implementing two models (Bosse et al., European council on modeling and simulation, pp. 212–218, 2009) and Durupinar, From audiences to mobs: Crowd simulation with psychological factors, PhD dissertation, Bilkent University, 2010) that draw from two separate lines of contagion research: thermodynamics-based and epidemiological-based. We first perform sensitivity tests on each model in an evacuation simulation, ESCAPES, showing both models to be reasonably robust to parameter variations with certain exceptions. We then compare their ability to reproduce a real crowd panic scene in simulation, showing that the thermodynamics-style model (Bosse et al., European council on modeling and simulation, pp. 212–218, 2009) produces superior results due to the ill-suited contagion mechanism at the core of epidemiological models. We also identify that a graduated effect of fear and proximity-based contagion effects are key to producing the superior results. We then reproduce the methodology on a second video, showing that the same results hold, implying generality of the conclusions reached in the first scene.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Morency, Louis-Philippe; Scherer, Stefan; Stratou, Giota; Boberg, Jill; Koenig, Sebastian; Adamson, Todd; Rizzo, Albert
User-State Sensing for Virtual Health Agents and TeleHealth Applications Proceedings Article
In: Medicine Meets Virtual Reality, San Diego, CA, 2013.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@inproceedings{gratch_user-state_2013,
title = {User-State Sensing for Virtual Health Agents and TeleHealth Applications},
author = {Jonathan Gratch and Louis-Philippe Morency and Stefan Scherer and Giota Stratou and Jill Boberg and Sebastian Koenig and Todd Adamson and Albert Rizzo},
url = {http://ict.usc.edu/pubs/User-State%20Sensing%20for%20Virtual%20Health%20Agents%20and%20TeleHealth%20Applications.pdf},
year = {2013},
date = {2013-02-01},
booktitle = {Medicine Meets Virtual Reality},
address = {San Diego, CA},
abstract = {Nonverbal behaviors play a crucial role in shaping outcomes in face-to- face clinical interactions. Experienced clinicians use nonverbals to foster rapport and “read” their clients to inform diagnoses. The rise of telemedicine and virtual health agents creates new opportunities, but it also strips away much of this non- verbal channel. Recent advances in low-cost computer vision and sensing technol- ogies have the potential to address this challenge by learning to recognize nonver- bal cues from large datasets of clinical interactions. These techniques can enhance both telemedicine and the emerging technology of virtual health agents. This arti- cle describes our current research in addressing these challenges in the domain of PTSD and depression screening for U.S. Veterans. We describe our general ap- proach and report on our initial contribution: the creation of a large dataset of clin- ical interview data that facilitates the training of user-state sensing technology.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; John, Bruce Sheffield; Newman, Brad; Williams, Josh; Hartholt, Arno; Lethin, Clarke; Buckwalter, John Galen
Virtual Reality as a Tool for Delivering PTSD Exposure Therapy and Stress Resilience Training Journal Article
In: Military Behavioral Health, vol. 1, pp. 48–54, 2013.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{rizzo_virtual_2013-1,
title = {Virtual Reality as a Tool for Delivering PTSD Exposure Therapy and Stress Resilience Training},
author = {Albert Rizzo and Bruce Sheffield John and Brad Newman and Josh Williams and Arno Hartholt and Clarke Lethin and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20as%20a%20Tool%20for%20Delivering%20PTSD%20Exposure%20Therapy%20and%20Stress%20Resilience%20Training.pdf},
doi = {10.1080/21635781.2012.721064},
year = {2013},
date = {2013-01-01},
journal = {Military Behavioral Health},
volume = {1},
pages = {48–54},
abstract = {The incidence of post-traumatic stress disorder (PTSD) in returning Operation Enduring Free- dom and Operation Iraqi Freedom military personnel has created a significant behavioral health care challenge. One emerging form of treatment for combat-related PTSD that has shown promise involves the delivery of exposure therapy using immersive virtual reality (VR). Initial outcomes from open clinical trials have been positive, and fully randomized controlled trials are currently in progress. Inspired by the initial success of our research using VR to emotionally engage and successfully treat persons undergoing exposure therapy for PTSD, we have developed a similar VR-based approach to deliver resilience training prior to an initial deployment. The STress Resilience In Virtual Environments (STRIVE) project aims to create a set of combat simulations (derived from our existing virtual Iraq/Afghanistan PTSD exposure therapy system) that are part of a multiepisode interactive narrative experience. Users can be immersed within challenging virtual combat contexts and interact with virtual characters as part of an experiential approach for learning psychoeducational material, stress manage- ment techniques, emotional coping strategies believed to enhance stress resilience. This article describes the development and evaluation of the virtual Iraq/Afghanistan exposure therapy system and then details its current transition into the STRIVE tool for predeployment stress resilience training.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Wang, Zhiyang; Lee, Jina; Marsella, Stacy C.
Multi-party, multi-role comprehensive listening behavior Journal Article
In: Journal of Autonomous Agents and Multi-Agent Systems, 2013.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{wang_multi-party_2013,
title = {Multi-party, multi-role comprehensive listening behavior},
author = {Zhiyang Wang and Jina Lee and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Multi-party%20multi-role%20comprehensive%20listening%20behavior.pdf},
year = {2013},
date = {2013-01-01},
journal = {Journal of Autonomous Agents and Multi-Agent Systems},
abstract = {Realizing effective listening behavior in virtual humans has become a key area of research, especially as research has sought to realize more complex social scenarios involving multiple participants and bystanders. A human listener’s nonverbal behavior is conditioned by a variety of factors, from current speaker’s behavior to the listener’s role and desire to participate in the conversation and unfolding comprehension of the speaker. Similarly, we seek to create virtual humans able to provide feedback based on their participatory goals and their unfolding understanding of, and reaction to, the relevance of what the speaker is saying as the speaker speaks. Based on a survey of existing psychological literature as well as recent technological advances in recognition and partial understanding of natural language, we describe a model of how to integrate these factors into a virtual human that behaves consistently with these goals. We then discuss how the model is implemented into a virtual human architecture and present an evaluation of behaviors used in the model.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Bousmalis, Konstantinos; Zafeiriou, Stefanos; Morency, Louis-Philippe; Pantic, Maja
Infinite Hidden Conditional Random Fields for Human Behavior Analysis Journal Article
In: IEEE Transactions on Neural Networks and Learning Systems, vol. 24, no. 1, pp. 170–177, 2013.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{bousmalis_infinite_2013,
title = {Infinite Hidden Conditional Random Fields for Human Behavior Analysis},
author = {Konstantinos Bousmalis and Stefanos Zafeiriou and Louis-Philippe Morency and Maja Pantic},
url = {http://ict.usc.edu/pubs/Infinite%20Hidden%20Conditional%20Random%20Fields%20for%20Human%20Behavior%20Analysis.pdf},
year = {2013},
date = {2013-01-01},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
volume = {24},
number = {1},
pages = {170–177},
abstract = {Hidden conditional random fields (HCRFs) are discriminative latent variable models that have been shown to successfully learn the hidden structure of a given classification problem (provided an appropriate validation of the number of hidden states). In this brief, we present the infinite HCRF (iHCRF), which is a nonparametric model based on hierarchical Dirichlet processes and is capable of automatically learning the optimal number of hidden states for a classification task. We show how we learn the model hyperparameters with an effective Markov-chain Monte Carlo sampling technique, and we explain the process that underlines our iHCRF model with the Restaurant Franchise Rating Agencies analogy. We show that the iHCRF is able to converge to a correct number of represented hidden states, and outperforms the best finite HCRFs—chosen via cross-validation—for the difficult tasks of recognizing instances of agreement, disagreement, and pain. Moreover, the iHCRF manages to achieve this performance in significantly less total training, validation, and testing time.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Artstein, Ron; Forbell, Eric; Foutz, Susan; Lane, H. Chad; Lange, Belinda; Morie, Jacquelyn; Noren, Dan; Rizzo, Albert; Traum, David
Virtual Humans for Learning Journal Article
In: AI magazine; Special issue on Intelligent Learning Technologies, vol. 34, no. 4, pp. 13–30, 2013.
Abstract | Links | BibTeX | Tags: Learning Sciences, MedVR, UARC, Virtual Humans
@article{swartout_virtual_2013,
title = {Virtual Humans for Learning},
author = {William Swartout and Ron Artstein and Eric Forbell and Susan Foutz and H. Chad Lane and Belinda Lange and Jacquelyn Morie and Dan Noren and Albert Rizzo and David Traum},
url = {http://www.aaai.org/ojs/index.php/aimagazine/article/view/2487},
doi = {10.1609/aimag.v34i4.2487},
year = {2013},
date = {2013-01-01},
journal = {AI magazine; Special issue on Intelligent Learning Technologies},
volume = {34},
number = {4},
pages = {13–30},
abstract = {Virtual humans are computer-generated characters designed to look and behave like real people. Studies have shown that virtual humans can mimic many of the social effects that one finds in human-human interactions such as creating rapport, and people respond to virtual humans in ways that are similar to how they respond to real people. We believe that virtual humans represent a new metaphor for interacting with computers, one in which working with a computer becomes much like interacting with a person and this can bring social elements to the interaction that are not easily supported with conventional interfaces. We present two systems that embody these ideas. The first, the Twins are virtual docents in the Museum of Science, Boston, designed to engage visitors and raise their awareness and knowledge of science. The second SimCoach, uses an empathetic virtual human to provide veterans and their families with information about PTSD and depression.},
keywords = {Learning Sciences, MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2012
Morbini, Fabrizio; Audhkhasi, Kartik; Artstein, Ron; Segbroeck, Maarten Van; Sagae, Kenji; Georgiou, Panayiotis G.; Traum, David; Narayanan, Shri
A Reranking Approach for Recognition and Classification of Speech Input in Conversational Dialogue Systems Proceedings Article
In: IEEE Workshop on Spoken Language Technology, Miami, Florida, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{morbini_reranking_2012,
title = {A Reranking Approach for Recognition and Classification of Speech Input in Conversational Dialogue Systems},
author = {Fabrizio Morbini and Kartik Audhkhasi and Ron Artstein and Maarten Van Segbroeck and Kenji Sagae and Panayiotis G. Georgiou and David Traum and Shri Narayanan},
url = {http://ict.usc.edu/pubs/A%20Reranking%20Approach%20for%20Recognition%20and%20Classification%20of%20Speech%20Input%20in%20Conversational%20Dialogue%20Systems.pdf},
year = {2012},
date = {2012-12-01},
booktitle = {IEEE Workshop on Spoken Language Technology},
address = {Miami, Florida},
abstract = {We address the challenge of interpreting spoken input in a conversational dialogue system with an approach that aims to exploit the close relationship between the tasks of speech recognition and language understanding through joint modeling of these two tasks. Instead of using a standard pipeline approach where the output of a speech recognizer is the input of a language understanding module, we merge multiple speech recognition and utterance classification hypotheses into one list to be processed by a joint reranking model. We obtain substantially improved performance in language understanding in experiments with thousands of user utterances collected from a deployed spoken dialogue system.⬚⬚⬚},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas; Sagae, Kenji; John, Bruce Sheffield; Rizzo, Albert
Designing Useful Virtual Standardized Patient Encounters Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC), Orlando, FL, 2012.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@inproceedings{talbot_designing_2012,
title = {Designing Useful Virtual Standardized Patient Encounters},
author = {Thomas Talbot and Kenji Sagae and Bruce Sheffield John and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Designing%20Useful%20Virtual%20Standardized%20Patient%20Encounters.pdf},
year = {2012},
date = {2012-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {Developers and educators have explored many different ways to create “Virtual Patients” as a method to simulate a patient encounter. Some of these attempts have been educationally useful, yet no approach taken to date has satisfac- torily replicated the Patient-Doctor encounter in a way that can be generalized nor have the best developments to date been readily author-able by regular medical educators. The best simulator to date is the human standardized patient actor, which has considerable disadvantages. The manner in which a virtual standardized patient can be de- signed requires a breakdown of the clinical encounter into components and a strategic approach to simulating each phase. These components are compared to find the optimal approach for each part of the medical encounter. The paper proposes a blend of an artificially intelligent statistical matching dialogue system with multiple choice state machine-based sub-conversations as a way in which one may richly simulate the interview and counseling phases of the clinical encounter. Also elucidated are the steps necessary for educator author-ability and approaches that will extract rich, objective assessment data. If such integration proves to be successful, the result will be a rich conversa- tional clinical simulation that closely approximates Patient-Doctor encounters.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Demski, Abram
Logical Prior Probability Proceedings Article
In: Conference on Artificial General Intelligence, Oxford, UK, 2012.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC, Virtual Humans
@inproceedings{demski_logical_2012,
title = {Logical Prior Probability},
author = {Abram Demski},
url = {http://ict.usc.edu/pubs/Logical%20Prior%20Probability.pdf},
year = {2012},
date = {2012-12-01},
booktitle = {Conference on Artificial General Intelligence},
address = {Oxford, UK},
abstract = {A Bayesian prior over first-order theories is defined. It is shown that the prior can be approximated, and the relationship to previously studied priors is examined.},
keywords = {CogArch, Cognitive Architecture, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hays, Matthew; Campbell, Julia; Trimmer, Matthew; Poore, Joshua; Webb, Andrea; Stark, Charles; King, Teresa
Can Role-Play with Virtual Humans Teach Interpersonal Skills? Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), Orlando, FL, 2012.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{hays_can_2012,
title = {Can Role-Play with Virtual Humans Teach Interpersonal Skills?},
author = {Matthew Hays and Julia Campbell and Matthew Trimmer and Joshua Poore and Andrea Webb and Charles Stark and Teresa King},
url = {http://ict.usc.edu/pubs/Can%20Role-Play%20with%20Virtual%20Humans%20Teach%20Interpersonal%20Skills.pdf},
year = {2012},
date = {2012-12-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {Interpersonal and counseling skills are essential to Officers’ ability to lead (Headquarters, Department of the Army, 2006, 2008, 2011). We developed a cognitive framework and an immersive training experience—the Immersive Naval Officer Training System (INOTS)—to help Officers learn and practice these skills (Campbell et al., 2011). INOTS includes up-front instruction about the framework, vignette-based demonstrations of its application, a roleplay session with a virtual human to practice the skills, and a guided after-action review (AAR). A critical component of any training effort is the assessment process; we conducted both formative and summative assessments of INOTS. Our formative assessments comprised surveys as well as physiological sensor equipment. Data from these instruments were used to evaluate how engaging the virtual-human based practice session was. We compared these data to a gold standard: a practice session with a live human role-player. We found that the trainees took the virtual-human practice session seriously—and that interacting with the virtual human was just as engaging as was interacting with the live human role-player. Our summative assessments comprised surveys as well as behavioral measures. We used these data to evaluate learning produced by the INOTS experience. In a pretestposttest design, we found reliable gains in the participants' understanding of and ability to apply interpersonal skills, although the limited practice with the virtual human did not provide additional immediate benefits. This paper details the development of our assessment approaches, the experimental procedures that yielded the data, and our results. We also discuss the implications of our efforts for the future design of assessments and training systems.},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2012, 2012.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@techreport{graham_measurement-based_2012,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2012.pdf},
year = {2012},
date = {2012-11-01},
number = {ICT TR 01 2012},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a technique for generating microstructure-level facial geometry by augmenting a mesostructure-level facial scan with detail synthesized from a set of exemplar skin patches scanned at much higher resolution. We use constrained texture synthesis based on image analogies to increase the resolution of the facial scan in a way that is consistent with the scanned mesostructure. We digitize the exemplar patches with a polarization-based computational illumination technique which considers specular reflection and single scattering. The recorded microstructure patches can be used to synthesize full-facial microstructure detail for either the same subject or to a different subject. We show that the technique allows for greater realism in facial renderings including more accurate reproduction of skin’s specular roughness and anisotropic reflection effects.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {techreport}
}
Ozkan, Derya; Morency, Louis-Philippe
Latent Mixture of Discriminative Experts Journal Article
In: IEEE Transactions on Multimedia, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{ozkan_latent_2012,
title = {Latent Mixture of Discriminative Experts},
author = {Derya Ozkan and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Latent%20Mixture%20of%20Discriminative%20Experts.pdf},
year = {2012},
date = {2012-11-01},
journal = {IEEE Transactions on Multimedia},
abstract = {In this paper, we introduce a new model called Latent Mixture of Discriminative Experts which can automatically learn the temporal relationship between different modalities. Since, we train separate experts for each modality, LMDE is capable of improving the prediction performance even with limited amount of data. For model interpretation, we present a sparse feature ranking algorithm that exploits L1 regularization. An empirical evaluation is provided on the task of listener backchannel prediction (i.e head nod). We introduce a new error evaluation metric called User-adaptive Prediction Accuracy that takes into account the difference in people’s backchannel responses. Our results confirm the importance of combining five types of multimodal features: lexical, syntactic structure, part-of-speech, visual and prosody. Latent Mixture of Discriminative Experts model outperforms previous approaches.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Feng, Andrew W.; Huang, Yazhou; Kallmann, Marcelo; Shapiro, Ari
An Analysis of Motion Blending Techniques Proceedings Article
In: International Conference on Motion in Games, Rennes, France, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{feng_analysis_2012,
title = {An Analysis of Motion Blending Techniques},
author = {Andrew W. Feng and Yazhou Huang and Marcelo Kallmann and Ari Shapiro},
url = {http://ict.usc.edu/pubs/An%20Analysis%20of%20Motion%20Blending%20Techniques.pdf},
year = {2012},
date = {2012-11-01},
booktitle = {International Conference on Motion in Games},
address = {Rennes, France},
abstract = {Motion blending is a widely used technique for character animation. The main idea is to blend similar motion examples according to blending weights, in order to synthesize new motions parameterizing high level characteristics of interest. We present in this paper an in-depth analysis and comparison of four motion blending techniques: Barycentric interpolation, Radial Basis Function, K-Nearest Neighbors and Inverse Blending optimization. Comparison metrics were designed to measure the performance across di⬚erent motion categories on criteria including smoothness, parametric error and computation time. We have implemented each method in our character animation platform SmartBody and we present several visualization renderings that provide a window for gleaning insights into the underlying pros and cons of each method in an intuitive way.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
The Light Stages and Their Applications to Photoreal Digital Actors Proceedings Article
In: SIGGRAPH Asia, Singapore, 2012.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{debevec_light_2012,
title = {The Light Stages and Their Applications to Photoreal Digital Actors},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/The%20Light%20Stages%20and%20Their%20Applications%20to%20Photoreal%20Digital%20Actors.pdf},
year = {2012},
date = {2012-11-01},
booktitle = {SIGGRAPH Asia},
address = {Singapore},
abstract = {The Light Stage systems built at UC Berkeley and USC ICT have enabled a variety of facial scanning and reflectance measurement techniques that have been explored in several research papers and used in various commercial applications. This short paper presents the evolutionary history of the Light Stage Systems and some of the techniques and applications they have enabled.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew W.; Huang, Yazhou; Xu, Yuyu; Shapiro, Ari
Automating the Transfer of a Generic Set of Behaviors onto a Virtual Character Proceedings Article
In: International Conference on Motion in Games, Rennes, France, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{feng_automating_2012,
title = {Automating the Transfer of a Generic Set of Behaviors onto a Virtual Character},
author = {Andrew W. Feng and Yazhou Huang and Yuyu Xu and Ari Shapiro},
url = {http://ict.usc.edu/pubs/Automating%20the%20Transfer%20of%20a%20Generic%20Set%20of%20Behaviors%20onto%20a%20Virtual%20Character.pdf},
year = {2012},
date = {2012-11-01},
booktitle = {International Conference on Motion in Games},
address = {Rennes, France},
abstract = {Humanoid 3D models can be easily acquired through various sources, including online. The use of such models within a game or simulation environment requires human input and intervention in order to associate such a model with a relevant set of motions and control mechanisms. In this paper, we demonstrate a pipeline where humanoid 3D models can be incorporated within seconds into an animation system, and infused with a wide range of capabilities, such as locomotion, object manipulation, gazing, speech synthesis and lip syncing. We o⬚er a set of heuristics that can associate arbitrary joint names with canonical ones, and describe a fast retargeting algorithm that enables us to instill a set of behaviors onto an arbitrary humanoid skeleton. We believe that such a system will vastly increase the use of 3D interactive characters due to the ease that new models can be animated.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Non-cooperative and Deceptive Virtual Agents Journal Article
In: IEEE Intelligent Systems: Trends and Controversies: Computational Deception and Noncooperation, vol. 27, no. 6, pp. 66–69, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{traum_non-cooperative_2012,
title = {Non-cooperative and Deceptive Virtual Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Non-cooperative%20and%20Deceptive%20Virtual%20Agents.pdf},
year = {2012},
date = {2012-11-01},
journal = {IEEE Intelligent Systems: Trends and Controversies: Computational Deception and Noncooperation},
volume = {27},
number = {6},
pages = {66–69},
abstract = {Virtual agents that engage in dialogue with people can be used for a variety of purposes, including as service and information providers, tutors, confederates in psychology experiments, and role players in social training exercises. It seems reasonable that agents acting as service and information providers, and arguably as tutors, would be be truthful and cooperative. For other applications, however, such as roleplaying opponents, competitors, or more neutral characters in a training exercise, total honesty and cooperativeness would defeat the purpose of the exercise and fail to train people in coping with deception. The Institute for Creative Technologies at the University of Southern California has created several roleplaying characters, using different models of dialogue and uncooperative and deceptive behavior. This article briefly describes these models, as used in two different genres of dialogue agent: interviewing and negotiation. The models are presented in order from least to most sophisticated reasoning about deception. Most accounts of pragmatic reasoning in dialogue use versions of Grice’s cooperative principles and maxims to derive utterance meanings (which might be indirect in their expression). However, these maxims, such as “be truthful,” don’t cover situations in which conversationalists are deceptive or otherwise uncooperative, even though much human dialogue contains aspects of uncooperative behavior. Gricean accounts alone don’t adequately cover cases in which conversational participants aren’t cooperative—for example, why do they ever answer at all? The notion of discourse obligations differentiatesthe obligation to respond from the mechanism of response generation, which could be either cooperative, neutral, or deceptive.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert; Forbell, Eric; Lange, Belinda; Buckwalter, John Galen; Williams, Josh; Sagae, Kenji; Traum, David
In: Healing War Trauma: A Handbook of Creative Approaches, pp. 238–250, Routledge, 2012.
Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@incollection{rizzo_simcoach_2012,
title = {SimCoach: An Online Intelligent Virtual Agent System for Breaking Down Barriers to Care for Service Members and Veterans},
author = {Albert Rizzo and Eric Forbell and Belinda Lange and John Galen Buckwalter and Josh Williams and Kenji Sagae and David Traum},
url = {http://ict.usc.edu/pubs/SimCoach-%20An%20Online%20Intelligent%20Virtual%20Agent%20System%20for%20Breaking%20Down%20Barriers%20to%20Care%20for%20Service%20Members%20and%20Veterans.pdf},
year = {2012},
date = {2012-11-01},
booktitle = {Healing War Trauma: A Handbook of Creative Approaches},
pages = {238–250},
publisher = {Routledge},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Morbini, Fabrizio; DeVault, David; Sagae, Kenji; Gerten, Jillian; Nazarian, Angela; Traum, David
FLoReS: A Forward Looking, Reward Seeking, Dialogue Manager Proceedings Article
In: 4th International Workshop on Spoken Dialog Systems, Paris, France, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{morbini_flores_2012,
title = {FLoReS: A Forward Looking, Reward Seeking, Dialogue Manager},
author = {Fabrizio Morbini and David DeVault and Kenji Sagae and Jillian Gerten and Angela Nazarian and David Traum},
url = {http://ict.usc.edu/pubs/FLoReS-%20A%20Forward%20Looking,%20Reward%20Seeking,%20Dialogue%20Manager.pdf},
year = {2012},
date = {2012-11-01},
booktitle = {4th International Workshop on Spoken Dialog Systems},
address = {Paris, France},
abstract = {We present FLoReS, a new information-state based dialogue manager, making use of forward inference, local dialogue structure, and plan operators representing sub-dialogue structure. The aim is to support both advanced, flexible, mixed initiative interaction and efficient policy creation by domain experts. The dialogue manager has been used for two characters in the SimCoach project, and is currently being used in several related projects. We present the design of the dialogue manager and preliminary comparative evaluation with a previous system that uses a more conventional state chart dialogue manager.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Chance, Eric; Haynes, Kip; Rajpurohit, Dinesh
In: Believable Bots: Can Computers Play Like People?, pp. 99–118, Springer Berlin Heidelberg, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Worlds
@incollection{morie_embodied_2012,
title = {Embodied Conversational Agent Avatars in Virtual Worlds: Making Today's Immersive Environments More Responsive to Participants},
author = {Jacquelyn Morie and Eric Chance and Kip Haynes and Dinesh Rajpurohit},
url = {http://link.springer.com/chapter/10.1007%2F978-3-642-32323-2_4},
year = {2012},
date = {2012-10-01},
booktitle = {Believable Bots: Can Computers Play Like People?},
pages = {99–118},
publisher = {Springer Berlin Heidelberg},
abstract = {Intelligent agents in the form of avatars in networked virtual worlds (VWs) are a new form of embodied conversational agent (ECA). They are still a topic of active re- search, but promise soon to rival the sophistication of virtual human agents developed on stand-alone platforms over the last decade. Such agents in today's VWs grew out of two lines of historical research: Virtual Reality and Artificial Intelligence. Their merger forms the basis for today's persistent 3D worlds occupied by intelligent char- acters serving a wide range of purposes. We believe ECA avatars will help to enable VWs to achieve a higher level of meaningful interaction by providing increased en- gagement and responsiveness within environments where people will interact with and even develop relationships with them.},
keywords = {UARC, Virtual Worlds},
pubstate = {published},
tppubtype = {incollection}
}
Wienberg, Christopher; Gordon, Andrew S.
PhotoFall: Discovering Weblog Stories Through Photographs Proceedings Article
In: ACM Conference on Information and Knowledge Management, Maui, Hawaii, 2012.
Abstract | Links | BibTeX | Tags: The Narrative Group, UARC
@inproceedings{wienberg_photofall_2012,
title = {PhotoFall: Discovering Weblog Stories Through Photographs},
author = {Christopher Wienberg and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/PhotoFall-%20Discovering%20Weblog%20Stories%20Through%20Photographs.PDF},
year = {2012},
date = {2012-10-01},
booktitle = {ACM Conference on Information and Knowledge Management},
address = {Maui, Hawaii},
abstract = {An effective means of retrieving relevant photographs from the web is to search for terms that would likely appear in the surrounding text in multimedia documents. In this paper, we investigate the complementary search strategy, where relevant multimedia documents are retrieved using the photographs they contain. We concentrate our efforts on the retrieval of large numbers of personal stories posted to Internet weblogs that are relevant to a particular search topic. Photographs are often included in posts of this sort, typically taken by the author during the course of the narrated events of the story. We describe a new story search tool, PhotoFall, which allows users to quickly find stories related to their topic of interest by judging the relevance of the photographs extracted from top search results. We evaluate the accuracy of relevance judgments made using this interface, and discuss the implications of the results for improving topic-based searches of multimedia content.},
keywords = {The Narrative Group, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.
Extending Mental Imagery in Sigma Proceedings Article
In: Conference on Artificial General Intelligence, Oxford, UK, 2012.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC, Virtual Humans
@inproceedings{rosenbloom_extending_2012,
title = {Extending Mental Imagery in Sigma},
author = {Paul S. Rosenbloom},
url = {http://ict.usc.edu/pubs/Extending%20Mental%20Imagery%20in%20Sigma.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {Conference on Artificial General Intelligence},
address = {Oxford, UK},
abstract = {This article presents new results on implementing mental imagery within the Sigma cognitive architecture. Rather than amounting to a distinct module, mental imagery is based on the same primitive, hybrid mixed, architectural mechanisms as Sigma's other cognitive capabilities. The work here demonstrates the creation and modification of compound images, the transformation of individual objects within such images, and the extraction of derived information from these compositions.},
keywords = {CogArch, Cognitive Architecture, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Song, Yale; Morency, Louis-Philippe; Davis, Randall
Multimodal Human Behavior Analysis: Learning Correlation and Interaction Across Modalities Proceedings Article
In: ACM International Conference on Multimodal Interaction (ICMI), Santa Monica, CA, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{song_multimodal_2012,
title = {Multimodal Human Behavior Analysis: Learning Correlation and Interaction Across Modalities},
author = {Yale Song and Louis-Philippe Morency and Randall Davis},
url = {http://ict.usc.edu/pubs/Multimodal%20Human%20Behavior%20Analysis-%20Learning%20Correlation%20and%20Interaction%20Across%20Modalities.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {ACM International Conference on Multimodal Interaction (ICMI)},
address = {Santa Monica, CA},
abstract = {Multimodal human behavior analysis is a challenging task due to the presence of complex nonlinear correlations and interactions across modalities. We present a novel approach to this problem based on Kernel Canonical Correlation Analysis (KCCA) and Multi-view Hidden Conditional Random Fields (MV-HCRF). Our approach uses a nonlinear kernel to map multimodal data to a high-dimensional feature space and finds a new projection of the data that maximizes the correlation across modalities. We use a multi-chain structured graphical model with disjoint sets of latent variables, one set per modality, to jointly learn both view-shared and view-specific substructures of the projected data, capturing interaction across modalities explicitly. We evaluate our approach on a task of agreement and disagreement recognition from nonverbal audio-visual cues using the Canal 9 dataset. Experimental results show that KCCA makes capturing nonlinear hidden dynamics easier and MV-HCRF helps learning interaction across modalities.⬚⬚⬚},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Mohammadi, Gelareh; Artstein, Ron; Morency, Louis-Philippe
Crowdsourcing Micro-Level Multimedia Annotations: The Challenges of Elevation and Interface Proceedings Article
In: International ACM Workshop on Crowdsourcing for Multimedia (CrowdMM), Nara, Japan, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{park_crowdsourcing_2012,
title = {Crowdsourcing Micro-Level Multimedia Annotations: The Challenges of Elevation and Interface},
author = {Sunghyun Park and Gelareh Mohammadi and Ron Artstein and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Crowdsourcing%20Micro-Level%20Multimedia%20Annotations-%20The%20Challenges%20of%20Elevation%20and%20Interface.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {International ACM Workshop on Crowdsourcing for Multimedia (CrowdMM)},
address = {Nara, Japan},
abstract = {This paper presents a new evaluation procedure and tool for crowdsourcing micro-level multimedia annotations and shows that such annotations can achieve a quality comparable to that of expert annotations. We propose a new evaluation procedure, called MM-Eval (Micro-level Multimedia Evaluation), which compares fine time-aligned annotations using Krippendorff’s alpha metric and introduce two new metrics to evaluate the types of disagreement between coders. We also introduce OCTAB (Online Crowdsourcing Tool for Annotations of Behaviors), a web-based annotation tool that allows precise and convenient multimedia behavior annotations, directly from Amazon Mechanical Turk interface. With an experiment using the above tool and evaluation procedure, we show that a majority vote among annotations from 3 crowdsource workers leads to a quality comparable to that of local expert annotations.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Weibel, Nadir; Oviatt, Sharon; Morency, Louis-Philippe
Multimodal Prediction of Expertise and Leadership in Learning Groups Proceedings Article
In: ACM International Conference on Multimodal Interaction (ICMI), Santa Monica, CA, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{scherer_multimodal_2012,
title = {Multimodal Prediction of Expertise and Leadership in Learning Groups},
author = {Stefan Scherer and Nadir Weibel and Sharon Oviatt and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Multimodal%20Prediction%20of%20Expertise%20and%20Leadership%20in%20Learning%20Groups.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {ACM International Conference on Multimodal Interaction (ICMI)},
address = {Santa Monica, CA},
abstract = {In this study, we investigate low level predictors from audio and writing modalities for the separation and identi⬚cation of socially dominant leaders and experts within a study group. We use a multimodal dataset of situated computer assisted group learning tasks: Groups of three high-school students solve a number of mathematical problems in two separate sessions. In order to automatically identify the socially dominant student and expert in the group we analyze a number of prosodic and voice quality features as well as writing-based features. In this preliminary study we identify a number of promising acoustic and writing predictors for the disambiguation of leaders, experts and other students. We believe that this exploratory study reveals key opportunities for future analysis of multimodal learning analytics based on a combination of audio and writing signals.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.
Deconstructing Reinforcement Learning in Sigma Proceedings Article
In: Conference on Artificial General Intelligence, Oxford, UK, 2012.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC, Virtual Humans
@inproceedings{rosenbloom_deconstructing_2012,
title = {Deconstructing Reinforcement Learning in Sigma},
author = {Paul S. Rosenbloom},
url = {http://ict.usc.edu/pubs/Deconstructing%20Reinforcement%20Learning%20in%20Sigma.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {Conference on Artificial General Intelligence},
address = {Oxford, UK},
abstract = {This article describes the development of reinforcement learning within the Sigma graphical cognitive architecture. Reinforcement learning has been deconstructed in terms of the interactions among more basic mechanisms and knowledge in Sigma, making it a derived capability rather than a de novo mechanism. Basic reinforcement learning – both model-based and model-free – are demonstrated, along with the intertwining of model learning.},
keywords = {CogArch, Cognitive Architecture, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Gratch, Jonathan; Morency, Louis-Philippe
I Already Know Your Answer: Using Nonverbal Behaviors to Predict Immediate Outcomes in a Dyadic Negotiation Proceedings Article
In: 14th ACM International Conference on Multimodal Interaction (ICMI), Santa Monica, CA, 2012.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{park_i_2012,
title = {I Already Know Your Answer: Using Nonverbal Behaviors to Predict Immediate Outcomes in a Dyadic Negotiation},
author = {Sunghyun Park and Jonathan Gratch and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/I%20Already%20Know%20Your%20Answer-%20Using%20Nonverbal%20Behaviors%20to%20Predict%20Immediate%20Outcomes%20in%20a%20Dyadic%20Negotiation.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {14th ACM International Conference on Multimodal Interaction (ICMI)},
address = {Santa Monica, CA},
abstract = {Be it in our workplace or with our family or friends, negotiation comprises a fundamental fabric of our everyday life, and it is apparent that a system that can automatically predict negotiation outcomes will have substantial implications. In this paper, we focus on finding nonverbal behaviors that are predictive of immediate outcomes (acceptances or rejections of proposals) in a dyadic negotiation. Looking at the nonverbal behaviors of the respondent alone would be inadequate since ample predictive information could also reside in the behaviors of the proposer, as well as the past history between the two parties. With this intuition in mind, we show that a more accurate prediction can be achieved by considering all the three sources (multimodal) of information together. We evaluate our approach on a face-to-face negotiation dataset consisting of 42 dyadic interactions and show that integrating all three sources of information outperforms each individual predictor.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Proceedings Article
In: ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12), Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{graham_measurement-based_2012-1,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/A%20Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12)},
address = {Los Angeles, CA},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
0000
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 0000.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, UARC, Virtual Humans
@article{gervits_classication-based_nodate,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {ARL, Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}