Publications
Search
Inoue, Naoya; Gordon, Andrew S.
A Scalable Weighted Max-SAT Implementation of Propositional Etcetera Abduction Proceedings Article
In: Proceedings of the 30th International Conference of the Florida AI Society (FLAIRS-30), AAAI Press, Marco Island, Florida, 2017.
@inproceedings{inoue_scalable_2017,
title = {A Scalable Weighted Max-SAT Implementation of Propositional Etcetera Abduction},
author = {Naoya Inoue and Andrew S. Gordon},
url = {http://people.ict.usc.edu/ gordon/publications/FLAIRS17.PDF},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 30th International Conference of the Florida AI Society (FLAIRS-30)},
publisher = {AAAI Press},
address = {Marco Island, Florida},
abstract = {Recent advances in technology for abductive reasoning, or inference to the best explanation, encourage the application of abduction to real-life commonsense reasoning problems. This paper describes Etcetera Abduction, a new implementation of logical abduction that is both grounded in probability theory and optimized using contemporary linear programming solvers. We present a Weighted Max-SAT formulation of Etcetera Abduction, which allows us to exploit highly advanced technologies developed in the field of SAT and Operations Research. Our experiments demonstrate the scalability of our proposal on a large-scale synthetic benchmark that contains up to ten thousand axioms, using one of the stateof-the-art mathematical optimizers developed in these fields. This is the first work to evaluate a SAT-based approach to abductive reasoning at this scale. The inference engine we developed has been made publicly available.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Laird, John E.; Lebiere, Christian
Précis of ‘A Standard Model of the Mind’ Proceedings Article
In: Proceedings of the Fifth Annual Conference on Advances in Cognitive Systems, 2014 Cognitive Systems Foundation, Troy, New York, 2017.
@inproceedings{rosenbloom_precis_2017,
title = {Précis of ‘A Standard Model of the Mind’},
author = {Paul S. Rosenbloom and John E. Laird and Christian Lebiere},
url = {http://cs.usc.edu/ rosenblo/Pubs/SM%20ACS%202017%20D.pdf},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Fifth Annual Conference on Advances in Cognitive Systems},
publisher = {2014 Cognitive Systems Foundation},
address = {Troy, New York},
abstract = {A standard model captures a community consensus over a coherent region of science, such as particle physics. Here we summarize the key points from a longer article (Laird, Lebiere & Rosenbloom, 2017) that proposes developing such a model for human-like minds.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Auerbach, Daniel; Mehta, Tirth R.; Hartholt, Arno
Building a Backbone for Multi-Agent Tutoring in GIFT (Work in Progress) Proceedings Article
In: Proceedings of the GIFTSym5, pp. 23–35, ARL, Orlando, Florida, 2017.
@inproceedings{nye_building_2017,
title = {Building a Backbone for Multi-Agent Tutoring in GIFT (Work in Progress)},
author = {Benjamin D. Nye and Daniel Auerbach and Tirth R. Mehta and Arno Hartholt},
url = {https://books.google.com/books?id=PwMtDwAAQBAJ&printsec=copyright&source=gbs_pub_info_r#v=onepage&q&f=false},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the GIFTSym5},
pages = {23–35},
publisher = {ARL},
address = {Orlando, Florida},
abstract = {As intelligent tutoring systems (ITS) increasingly need to interoperate and co-exist, emerging systems have transitioned toward service-oriented designs to enable modularity and composability of tutoring components made and/or maintained by different research and development groups. However, as a research community, we have still not reached a point where it is trivial for a new service to be added into a system like the Generalized Intelligent Framework for Tutoring (GIFT; Sottilare, Goldberg, Brawner, & Holden, 2012). In an early paper considering this issue with respect to the GIFT architecture (Nye & Morrison, 2013), we proposed addressing this issue by building toward a lightweight multi-agent archi-tecture where certain services act as autonomous agents: “a system situated within and a part of an environment that senses that environment and acts on it, over time, in pursuit of its own agenda and so as to affect what it senses in the future” (Franklin & Graesser, 1997; p. 25). In our work in progress described here, we discuss how we are approaching the opportunity to build such capabilities into GIFT. The high level goals of our work are targeting two core goals for GIFT: A) to be a lightweight framework that will expand access to and use of ITS and B) to help GIFT to increase the intelligence and effectiveness of its services based on data over time. We are currently targeting the first goal, which will underpin the second goal. However, what does it mean to be a lightweight framework? In this context, a “lightweight framework” is framed as minimizing the following criteria: (1) hardware requirements, (2) software expertise to design services, (3) software expertise to use existing services, (4) software expertise to stand up the message-passing layer between agents, and (5) a minimal working message ontology (Nye & Morrison, 2013). Since our original paper four years ago, GIFT has made significant strides in reducing barriers related to hardware by building a cloud-based version and software expertise to use GIFT services through authoring tools. It has also developed a growing ontology of messages (e.g., https://gifttutoring.org/projects/gift/wiki/Interface_Control_Document_2016-1). With that said, despite now-extensive documentation, designing new services for GIFT is still not trivial and strong expertise is required to pass messages between GIFT modules and agents (either internal or external). To address these issues, the Building a Backbone project is working toward agent-oriented designs that build on GIFT's existing service-oriented framework. By moving from services toward agents, modules will be able to act more autonomously, enabling capabilities such as plug-and-play, hotswapping, and selecting between multiple services providing the same capabilities. These new capabilities are intended to reduce barriers to building new GIFT-compatible services and also to integrating GIFT with other service-oriented ecosystems. The first steps toward these capabilities are an ontology mapping service and an initial integration that combines GIFT, the Virtual Human Toolkit core framework for agents, and the SuperGLU framework for adding agent-oriented capabilities for coordinating services. This paper reports on work to date, with an emphasis on target capabilities, design decisions, challenges, and open research questions for this work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Traum, David; Boberg, Jill; Gainer, Alesia; Gratch, Jonathan; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Listen to My Body: Does Making Friends Help Influence People? Proceedings Article
In: Proceedings of the 30th International Florida Artificial Intelligence Research Society Conference (FLAIRS-30), AAAI, Marco Island, Florida, 2017.
@inproceedings{artstein_listen_2017,
title = {Listen to My Body: Does Making Friends Help Influence People?},
author = {Ron Artstein and David Traum and Jill Boberg and Alesia Gainer and Jonathan Gratch and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {https://aaai.org/ocs/index.php/FLAIRS/FLAIRS17/paper/view/15501/14979},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 30th International Florida Artificial Intelligence Research Society Conference (FLAIRS-30)},
publisher = {AAAI},
address = {Marco Island, Florida},
abstract = {We investigate the effect of relational dialogue on creating rapport and exerting social influence in human-robot conversation, by comparing interactions with and without a relational component, and with different agent types. Human participants interact with two agents – a Nao robot and a virtual human – in four dialogue scenarios: one involving building familiarity, and three involving sharing information and persuasion in item-ranking tasks. Results show that both agents influence human decision-making; people prefer interacting with the robot, feel higher rapport with the robot, and believe the robot has more influence; and that objective influence of the agent on the person is increased by building familiarity, but is not significantly different between the agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S
Solving Interpretation Problems With Etcetera Abduction Proceedings Article
In: Proceedings of the Fifth Annual Conference on Advances in Cognitive Systems, 2014 Cognitive Systems Foundation, Troy, New York, 2017.
@inproceedings{gordon_solving_2017,
title = {Solving Interpretation Problems With Etcetera Abduction},
author = {Andrew S Gordon},
url = {http://people.ict.usc.edu/ gordon/publications/ACS17.PDF},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Fifth Annual Conference on Advances in Cognitive Systems},
publisher = {2014 Cognitive Systems Foundation},
address = {Troy, New York},
abstract = {Among the most challenging problems in Artificial Intelligence are those that require human-like abilities to make sense of ambiguous observations, to interpret events in context given a wealth of life experiences and commonsense knowledge. In the 1990s, Jerry Hobbs and colleagues demonstrated how interpretation problems can be tackled with logical abduction, a combinatorial search for the best set of assumptions that logically entails the observations. Etcetera Abduction is a new approach to ranking assumptions by reifying the uncertainty of knowledge base axioms as etcetera literals, representing conditional and prior probabilities that can be combined through logical unification. In this invited talk, I will highlight some of the features of Etcetera Abduction that make it attractive compared to alternatives, and share my perspective on the role of logic-based reasoning given current trends in machine learning research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan; DeVault, David
Towards An Autonomous Agent that Provides Automated Feedback on Students' Negotiation Skills Proceedings Article
In: Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems, pp. 410–418, International Foundation for Autonomous Agents and Multiagent Systems, Sao Paulo, Brazil, 2017.
@inproceedings{johnson_towards_2017,
title = {Towards An Autonomous Agent that Provides Automated Feedback on Students' Negotiation Skills},
author = {Emmanuel Johnson and Jonathan Gratch and David DeVault},
url = {http://dl.acm.org/citation.cfm?id=3091187},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems},
pages = {410–418},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Sao Paulo, Brazil},
abstract = {Although negotiation is an integral part of daily life, most people are unskilled negotiators. To improve one's skill set, a range of costly options including self-study guides, courses, and training programs are o ered by various companies and educational institutions. For those who can't a ord costly training options, virtual role playing agents o er a low-costalternative. To be e ective, these systems must allow students to engage in experiential learning exercises and provide personalized feedback on the learner's performance. In this paper, we show how a number of negotiation principles can be formalized and quanti ed. We then establish the pedagogical relevance of several automatic metrics, and show that these metrics are signi cantly correlated with negotiation outcomes in a human-agent negotiation. This illustrates the realism and helps to validate these principles. It also shows the potential of technology being used to quantify feedback that is traditionally provided through more qualitative approaches. The metrics we describe can provide students with personalized feedback on the errors they make in a negotiation exercise and thereby support guided experiential learning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Scherer, Stefan
Assessing Public Speaking Ability from Thin Slices of Behavior Proceedings Article
In: Proceedings of the 12th IEEE Conference on Automatic Face and Gesture Recognition (FG 2017), pp. 310–316, IEEE, Washington, DC, 2017, ISBN: 978-1-5090-4023-0.
@inproceedings{chollet_assessing_2017,
title = {Assessing Public Speaking Ability from Thin Slices of Behavior},
author = {Mathieu Chollet and Stefan Scherer},
url = {http://ieeexplore.ieee.org/document/7961757/},
doi = {10.1109/FG.2017.45},
isbn = {978-1-5090-4023-0},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the 12th IEEE Conference on Automatic Face and Gesture Recognition (FG 2017)},
pages = {310–316},
publisher = {IEEE},
address = {Washington, DC},
abstract = {An important aspect of public speaking is delivery, which consists of the appropriate use of non-verbal cues to strengthen the message. Recent works have successfully predicted ratings of public speaking delivery aspects using the entire presentations of speakers. However, in other contexts, such as the assessment of personality or the prediction of job interview outcomes, it has been shown that thin slices, brief excerpts of behavior, provide enough information for raters to make accurate predictions. In this paper, we consider the use of thin slices for predicting ratings of public speaking behavior. We use a publicly available corpus of public speaking presentations and obtain ratings of full videos and thin slices. We first study how thin slices ratings are related to full video ratings. Then, we use automatic audio-visual feature extraction methods and machine learning algorithms to create models for predicting public speaking ratings, and evaluate these models for predicting thin slices ratings and full videos ratings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Rosenberg, Evan Suma; Shapiro, Ari
Just-in-time, viable, 3D avatars from scans Journal Article
In: Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents), vol. 28, no. 3-4, 2017.
@article{feng_just--time_2017,
title = {Just-in-time, viable, 3D avatars from scans},
author = {Andrew Feng and Evan Suma Rosenberg and Ari Shapiro},
url = {http://onlinelibrary.wiley.com/doi/10.1002/cav.1769/epdf},
doi = {10.1002/cav.1769},
year = {2017},
date = {2017-05-01},
journal = {Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents)},
volume = {28},
number = {3-4},
abstract = {We demonstrate a system that can generate a photorealistic, interactive 3-D character from a human subject that is capable of movement, emotion, speech, and gesture in less than 20 min without the need for 3-D artist intervention or specialized technical knowledge through a near automatic process. Our method uses mostly commodity or off-the-shelf hardware. We demonstrate the just-in-time use of generating such 3-D models for virtual and augmented reality, games, simulation, and communication. We anticipate that the inexpensive generation of such photorealistic models will be useful in many venues where a just-in-time 3-D reconstructions of digital avatars that resemble particular human subjects is necessary.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Stratou, Giota; Morency, Louis-Philippe
MultiSense—Context-Aware Nonverbal Behavior Analysis Framework: A Psychological Distress Use Case Journal Article
In: IEEE Transactions on Affective Computing, vol. 8, no. 2, pp. 190–203, 2017, ISSN: 1949-3045.
@article{stratou_multisensecontext-aware_2017,
title = {MultiSense—Context-Aware Nonverbal Behavior Analysis Framework: A Psychological Distress Use Case},
author = {Giota Stratou and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/document/7579221/},
doi = {10.1109/TAFFC.2016.2614300},
issn = {1949-3045},
year = {2017},
date = {2017-04-01},
journal = {IEEE Transactions on Affective Computing},
volume = {8},
number = {2},
pages = {190–203},
abstract = {During face-to-face interactions, people naturally integrate nonverbal behaviors such as facial expressions and body postures as part of the conversation to infer the communicative intent or emotional state of their interlocutor. The interpretation of these nonverbal behaviors will often be contextualized by interactional cues such as the previous spoken question, the general discussion topic or the physical environment. A critical step in creating computers able to understand or participate in this type of social face-to-face interactions is to develop a computational platform to synchronously recognize nonverbal behaviors as part of the interactional context. In this platform, information for the acoustic and visual modalities should be carefully synchronized and rapidly processed. At the same time, contextual and interactional cues should be remembered and integrated to better interpret nonverbal (and verbal) behaviors. In this article, we introduce a real-time computational framework, MultiSense, which offers flexible and efficient synchronization approaches for context-based nonverbal behavior analysis. MultiSense is designed to utilize interactional cues from both interlocutors (e.g., from the computer and the human participant) and integrate this contextual information when interpreting nonverbal behaviors. MultiSense can also assimilate behaviors over a full interaction and summarize the observed affective states of the user. We demonstrate the capabilities of the new framework with a concrete use case from the mental health domain where MultiSense is used as part of a decision support tool to assess indicators of psychological distress such as depression and post-traumatic stress disorder (PTSD). In this scenario, MultiSense not only infers psychological distress indicators from nonverbal behaviors but also broadcasts the user state in real-time to a virtual agent (i.e., a digital interviewer) designed to conduct semi-structured interviews with human participants. Our experiments show the added value of our multimodal synchronization approaches and also demonstrate the importance of MultiSense contextual interpretation when inferring distress indicators.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Manini, Barbara; Tsui, Katherine; Stone, Adam; Scassellati, Brian; Traum, David; Merla, Arcangelo; Petitto, Laura Ann
Physiological and behavioral correlates of babies’ social engagement with robot and virtual human artificial intelligence agents Proceedings Article
In: Proceedings of SRCD, Austin, TX, 2017.
@inproceedings{manini_physiological_2017,
title = {Physiological and behavioral correlates of babies’ social engagement with robot and virtual human artificial intelligence agents},
author = {Barbara Manini and Katherine Tsui and Adam Stone and Brian Scassellati and David Traum and Arcangelo Merla and Laura Ann Petitto},
url = {https://www.researchgate.net/publication/316167858_Physiological_and_behavioral_correlates_of_babies'_social_engagement_with_robot_and_virtual_human_artificial_intelligence_agents},
year = {2017},
date = {2017-04-01},
booktitle = {Proceedings of SRCD},
address = {Austin, TX},
abstract = {Exposure to the patterns of natural language in early life—especially in ways that are rich in socially contingent interaction and conversation—is among the most powerful facilitators of the human language acquisition process (Petitto et al., 2016). Adults’ infant-directed language (e.g., simple rhythmic nursery rhymes), communicated in social interactions with joint attention, supports babies’ biological predisposition to language development in the first year of life (Brook & Meltzoff, 2015). Yet many babies have minimal language exposure in early life that can have devastating consequences for their language learning and reading success—such as the deaf baby. With the aim to develop a learning tool for babies deprived of natural language input during sensitive periods in human development, we studied whether artificial intelligent agents (social robots and virtual humans) can serve as an augmentative communicative partner in early infancy. Using innovative thermal IR imaging technology, we recorded, imaged, and analyzed infants’ emotional arousal and behavioral responses during social interactions with a robot and virtual human, as compared with a real human. We asked whether babies’ physiological and behavioral responses of joint attention during these robot and virtual human interactions were similar to or different from interactions with a real human. We hypothesized that if babyartificial agent emotional arousal measures were observed to be similar to humans, then artificial agents may potentially serve as a promising tool in facilitating language learning in infants with early-life minimal language exposure. Methods: 10 hearing (nonsigning) infants (five 6-9mths; five 9-12mths). Following Meltzoff et al. (2010), after a brief familiarization period with the robot, infants participated in 6 10 episodes of robot head and eye gaze turning (left or right). Two screens were placed on each side of the robot, rendering it “looking at the screen” when it turned its head. Contiguous with the robot’s gaze/head, both screens showed a nursery rhyme in ASL, performed alternatively by a virtual human or a real human (held constant: physical features and linguistic content). Results: Time-locked/integrated infant behavior and thermal responses were analyzed (c.f., Merla, 2004; Manini et al., 2013). (1) Behavioral data showed babies followed robot gaze, yet the Thermal IR data added new insights: Significant increase in nasal-tip temperature was observed, indicative of suppression of the sympathetic activity and increase of parasympathetic/pro-social attentiveness. (2) Thermal responses with virtual human vs real human revealed a phasic decrease of temperature likely associated with increased vigilance and higher cognitive attention processes (e.g., match-mismatch analysis). Discussion: Robots and virtual humans may be effective as augmentative communicative partners for young babies. Novel here, we observed an integrated physiological and behavioral response of joint attention and social engagement during babies’ interaction with the robot. Moreover, the virtual human elicited a peaked attentional arousal reaction, which may be indicative of linguistic stimuli detection and/or a “readiness to learn.” The integration of physiological and behavioral responses provide insights that pave the way for groundbreaking applications in the field of artificial intelligence (Merla, 2014) and augmentative learning tools that promote language acquisition in young children.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen; Amir, Ori; Lin, Rebecca
Social influence of humor in virtual human counselor's self-disclosure Journal Article
In: Computer Animation and Virtual Worlds, vol. 28, no. 3-4, 2017, ISSN: 15464261.
@article{kang_social_2017,
title = {Social influence of humor in virtual human counselor's self-disclosure},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang and Ori Amir and Rebecca Lin},
url = {http://doi.wiley.com/10.1002/cav.1763},
doi = {10.1002/cav.1763},
issn = {15464261},
year = {2017},
date = {2017-04-01},
journal = {Computer Animation and Virtual Worlds},
volume = {28},
number = {3-4},
abstract = {We explored the social influence of humor in a virtual human counselor's selfdisclosure while also varying the ethnicity of the virtual counselor. In a 2 × 3 experiment (humor and ethnicity of the virtual human counselor), participants experienced counseling interview interactions via Skype on a smartphone. We measured user responses to and perceptions of the virtual human counselor. The results demonstrate that humor positively affects user responses to and perceptions of a virtual counselor. The results further suggest that matching styles of humor with a virtual counselor's ethnicity influences user responses and perceptions. The results offer insight into the effective design and development of realistic and believable virtual human counselors. Furthermore, they illuminate the potential use of humor to enhance self‐disclosure in human–agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Scherer, Scherer
The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment Proceedings Article
In: Proceedings of the Preconference on Affective Computing at the Society for Affective Science, Boston, MA, 2017.
@inproceedings{neubauer_effects_2017,
title = {The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment},
author = {Catherine Neubauer and Scherer Scherer},
url = {http://ict.usc.edu/pubs/The%20Effects%20of%20Pre-task%20Team%20Collaboration%20on%20Facial%20Expression%20and%20Speech%20Entrainment.pdf},
year = {2017},
date = {2017-04-01},
booktitle = {Proceedings of the Preconference on Affective Computing at the Society for Affective Science},
address = {Boston, MA},
abstract = {Many everyday tasks are complex and require the coordination of one or more individuals. Such tasks can be relatively simple like passing a ball to a friend during a game of catch, while others are more complex such as performing a life-saving surgery where surgeons, anesthesiologists and nurses all work together in a multi-person team [1]. Such coordination requires the appropriate allocation of cognitive and behavioral effort to meet the changing demands of their environment and cannot be completed alone [1]. These mutually cooperative behaviors can include team communication, body position and even affective cues [2]. Some behaviors are explicitly controlled to be coordinated [3] (e.g., when an individual purposely attempts to follow the behaviors of their teammate or team leader), while others are implicit or unconscious. Presently, these shared behaviors have been referred to as entrainment [4] [5], mimicry [6] [7] and even action matching [8] [9]; however, the specific term used typically refers to the underlying theoretical cause for the phenomenon. Theoretically, entrainment can be explained as the spontaneous interpersonal coupling that occurs because the behavior of one or more individuals is affected by another’s behavior in a closed loop system. Additionally, such behavior is typically evident when working on a mutual, goal-directed task [10]. Therefore, for the purposes of this paper we will refer to the cooperative behaviors between teammates that support problem solving as entrainment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Chao; Lu, Xin; Lin, Zhe; Shechtman, Eli; Wang, Oliver; Li, Hao
High-Resolution Image Inpainting using Multi-Scale Neural Patch Synthesis Journal Article
In: arXiv preprint arXiv:1611.09969v2, 2017.
@article{yang_high-resolution_2017,
title = {High-Resolution Image Inpainting using Multi-Scale Neural Patch Synthesis},
author = {Chao Yang and Xin Lu and Zhe Lin and Eli Shechtman and Oliver Wang and Hao Li},
url = {https://arxiv.org/pdf/1611.09969},
year = {2017},
date = {2017-04-01},
journal = {arXiv preprint arXiv:1611.09969v2},
abstract = {Recent advances in deep learning have shown exciting promise in filling large holes in natural images with semantically plausible and context aware details, impacting fundamental image manipulation tasks such as object removal. While these learning-based methods are significantly more effective in capturing high-level features than prior techniques, hey can only handle very low-resolution inputs due to memory limitations and difficulty in training. Even for slightly larger images, the inpainted regions would appear blurry and unpleasant boundaries become visible. We propose a multi-scale neural patch synthesis approach based on joint optimization of image content and texture constraints, which not only preserves contextual structures but also produces high-frequency details by matching and adapting patches with the most similar mid-layer feature correlations of a deep classification network. We evaluate our method on the ImageNet and Paris Streetview datasets and achieved state-of-theart inpainting accuracy. We show our approach produces sharper and more coherent results than prior methods, especially for high-resolution images.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Roemmele, Melissa; Kobayashi, Sosuke; Inoue, Naoya; Gordon, Andrew M.
An RNN-based Binary Classifier for the Story Cloze Test Proceedings Article
In: Proceedings of the 2nd Workshop on Linking Models of Lexical, Sentential and Discourse-level Semantics, pp. 74–80, Association for Computational Linguistics, Valencia, Spain, 2017.
@inproceedings{roemmele_rnn-based_2017,
title = {An RNN-based Binary Classifier for the Story Cloze Test},
author = {Melissa Roemmele and Sosuke Kobayashi and Naoya Inoue and Andrew M. Gordon},
url = {http://www.aclweb.org/anthology/W/W17/W17-09.pdf#page=86},
year = {2017},
date = {2017-04-01},
booktitle = {Proceedings of the 2nd Workshop on Linking Models of Lexical, Sentential and Discourse-level Semantics},
pages = {74–80},
publisher = {Association for Computational Linguistics},
address = {Valencia, Spain},
abstract = {The Story Cloze Test consists of choosing a sentence that best completes a story given two choices. In this paper we present a system that performs this task using a supervised binary classifier on top of a recurrent neural network to predict the probability that a given story ending is correct. The classifier is trained to distinguish correct story endings given in the training data from incorrect ones that we artificially generate. Our experiments evaluate different methods for generating these negative examples, as well as different embedding-based representations of the stories. Our best result obtains 67.2% accuracy on the test set, outperforming the existing top baseline of 58.5%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chang, Jonathan; Scherer, Stefan
LEARNING REPRESENTATIONS OF EMOTIONAL SPEECH WITH DEEP CONVOLUTIONAL GENERATIVE ADVERSARIAL NETWORKS Proceedings Article
In: Proceedings of the 42nd IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP 2017, IEEE, New Orleans, LA, 2017.
@inproceedings{chang_learning_2017,
title = {LEARNING REPRESENTATIONS OF EMOTIONAL SPEECH WITH DEEP CONVOLUTIONAL GENERATIVE ADVERSARIAL NETWORKS},
author = {Jonathan Chang and Stefan Scherer},
url = {https://arxiv.org/pdf/1705.02394.pdf},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of the 42nd IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP 2017},
publisher = {IEEE},
address = {New Orleans, LA},
abstract = {Automatically assessing emotional valence in human speech has historically been a difficult task for machine learning algorithms. The subtle changes in the voice of the speaker that are indicative of positive or negative emotional states are often ”overshadowed” by voice characteristics relating to emotional intensity or emotional activation. In this work we explore a representation learning approach that automatically derives discriminative representations of emotional speech. In particular, we investigate two machine learning strategies to improve classifier performance: (1) utilization of unlabeled data using a deep convolutional generative adversarial network (DCGAN), and (2) multitask learning. Within our extensive experiments we leverage a multitask annotated emotional corpus as well as a large unlabeled meeting corpus (around 100 hours). Our speaker-independent classification experiments show that in particular the use of unlabeled data in our investigations improves performance of the classifiers and both fully supervised baseline approaches are outperformed considerably. We improve the classification of emotional valence on a discrete 5-point scale to 43.88% and on a 3-point scale to 49.80%, which is competitive to state-of-the-art performance. Index Terms— Machine Learning, Affective Computing, Semisupervised Learning, Deep Learning},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Phan, Thai; Kang, Sin-Hwa
Motor Adaptation in Response to Scaling and Diminished Feedback in Virtual Reality Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 233–234, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{krum_motor_2017,
title = {Motor Adaptation in Response to Scaling and Diminished Feedback in Virtual Reality},
author = {David M. Krum and Thai Phan and Sin-Hwa Kang},
url = {http://ieeexplore.ieee.org/document/7892262/#full-text-section},
doi = {10.1109/VR.2017.7892262},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {233–234},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {As interaction techniques involving scaling of motor space in virtual reality are becoming more prevalent, it is important to understand how individuals adapt to such scalings and how they re-adapt back to non-scaled norms. This preliminary work examines how individuals, performing a targeted ball throwing task, adapted to addition and removal of a translational scaling of the ball’s forward flight. This was examined under various conditions: flight of the ball shown with no delay, hidden flight of the ball with no delay, and hidden flight with a 2 second delay. Hiding the ball’s flight, as well as the delay, created disruptions in the ability of the participants to perform the task and adapt to new scaling conditions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan; Anglin, Julia; Krum, David M.; Liew, Sook-Lei
REINVENT: A Low-Cost, Virtual Reality Brain-Computer Interface for Severe Stroke Upper Limb Motor Recovery Proceedings Article
In: Proceedings of the IEEE Virtual Reality Conference, pp. 385–386, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{spicer_reinvent_2017,
title = {REINVENT: A Low-Cost, Virtual Reality Brain-Computer Interface for Severe Stroke Upper Limb Motor Recovery},
author = {Ryan Spicer and Julia Anglin and David M. Krum and Sook-Lei Liew},
url = {http://ieeexplore.ieee.org/abstract/document/7892338/},
doi = {10.1109/VR.2017.7892338},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of the IEEE Virtual Reality Conference},
pages = {385–386},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {There are few effective treatments for rehabilitation of severe motor impairment after stroke. We developed a novel closed-loop neurofeedback system called REINVENT to promote motor recovery in this population. REINVENT (Rehabilitation Environment using the Integration of Neuromuscular-based Virtual Enhancements for Neural Training) harnesses recent advances in neuroscience, wearable sensors, and virtual technology and integrates low-cost electroencephalography (EEG) and electromyography (EMG) sensors with feedback in a head-mounted virtual reality display (VR) to provide neurofeedback when an individual's neuromuscular signals indicate movement attempt, even in the absence of actual movement. Here we describe the REINVENT prototype and provide evidence of the feasibility and safety of using REINVENT with older adults.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Choromanski, Igor; Neubauer, Catherine; Krum, David M.; Spicer, Ryan; Campbell, Julia
Mixed Reality Training for Tank Platoon Leader Communication Skills Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 333–334, IEEE, Los Angeles, CA, 2017.
@inproceedings{khooshabeh_mixed_2017,
title = {Mixed Reality Training for Tank Platoon Leader Communication Skills},
author = {Peter Khooshabeh and Igor Choromanski and Catherine Neubauer and David M. Krum and Ryan Spicer and Julia Campbell},
url = {http://ieeexplore.ieee.org/document/7892312/#full-text-section},
doi = {10.1109/VR.2017.7892312},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {333–334},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Here we describe the design and usability evaluation of a mixed reality prototype to simulate the role of a tank platoon leader, who is an individual who not only is a tank commander, but also directs a platoon of three other tanks with their own respective tank commanders. The domain of tank commander training has relied on physical simulators of the actual Abrams tank and encapsulates the whole crew. The TALK-ON system we describe here focuses on training communication skills of the leader in a simulated tank crew. We report results from a usability evaluation and discuss how they will inform our future work for collective tank training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dehghani, Morteza; Boghrati, Reihane; Man, Kingson; Hoover, Joseph; Gimbel, Sarah; Vaswani, Ashish; Zevin, Jason; Immordino, Mary Helen; Gordon, Andrew; Damasio, Antonio; Kaplan, Jonas T.
Decoding the Neural Representation of Story Meanings across Languages Journal Article
In: Human Brain Mapping, vol. 38, no. 12, 2017.
@article{dehghani_decoding_2017,
title = {Decoding the Neural Representation of Story Meanings across Languages},
author = {Morteza Dehghani and Reihane Boghrati and Kingson Man and Joseph Hoover and Sarah Gimbel and Ashish Vaswani and Jason Zevin and Mary Helen Immordino and Andrew Gordon and Antonio Damasio and Jonas T. Kaplan},
url = {https://psyarxiv.com/qrpp3/},
doi = {10.1002/hbm.23814},
year = {2017},
date = {2017-03-01},
journal = {Human Brain Mapping},
volume = {38},
number = {12},
abstract = {Drawing from a common lexicon of semantic units, humans fashion narratives whose meaning transcends that of their individual utterances. However, while brain regions that represent lower-level semantic units, such as words and sentences, have been identified, questions remain about the neural representation of narrative comprehension, which involves inferring cumulative meaning. To address these questions, we exposed English, Mandarin and Farsi native speakers to native language translations of the same stories during fMRI scanning. Using a new technique in natural language processing, we calculated the distributed representations of these stories (capturing the meaning of the stories in high-dimensional semantic space), and demonstrate that using these representations we can identify the specific story a participant was reading from the neural data. Notably, this was possible even when the distributed representations were calculated using stories in a different language than the participant was reading. Relying on over 44 billion classifications, our results reveal that identification relied on a collection of brain regions most prominently located in the default mode network. These results demonstrate that neuro-semantic encoding of narratives happens at levels higher than individual semantic units and that this encoding is systematic across both individuals and languages.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Azmandian, Mahdi; Grechkin, Timofey; Rosenberg, Evan Suma
An Evaluation of Strategies for Two-User Redirected Walking in Shared Physical Spaces Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 91–98, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{azmandian_evaluation_2017,
title = {An Evaluation of Strategies for Two-User Redirected Walking in Shared Physical Spaces},
author = {Mahdi Azmandian and Timofey Grechkin and Evan Suma Rosenberg},
url = {http://ieeexplore.ieee.org/abstract/document/7892235/},
doi = {10.1109/VR.2017.7892235},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {91–98},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {As the focus of virtual reality technology is shifting from singleperson experiences to multi-user interactions, it becomes increasingly important to accommodate multiple co-located users within a shared real-world space. For locomotion and navigation, the introduction of multiple users moving both virtually and physically creates additional challenges related to potential user-on-user collisions. In this work, we focus on defining the extent of these challenges, in order to apply redirected walking to two users immersed in virtual reality experiences within a shared physical tracked space. Using a computer simulation framework, we explore the costs and benefits of splitting available physical space between users versus attempting to algorithmically prevent user-to-user collisions. We also explore fundamental components of collision prevention such as steering the users away from each other, forced stopping, and user re-orientation. Each component was analyzed for the number of potential disruptions to the flow of the virtual experience. We also develop a novel collision prevention algorithm that reduces overall interruptions by 17.6% and collision prevention events by 58.3%. Our results show that sharing space using our collision prevention method is superior to subdividing the tracked space.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2008
Parsons, Thomas D.; Rizzo, Albert
Initial Validation of a Virtual Environment for Assessment of Memory Functioning: Virtual Reality Cognitive Performance Assessment Test Journal Article
In: CyberPsychology and Behavior, vol. 11, no. 1, pp. 16–24, 2008.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_initial_2008,
title = {Initial Validation of a Virtual Environment for Assessment of Memory Functioning: Virtual Reality Cognitive Performance Assessment Test},
author = {Thomas D. Parsons and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Initial%20Validation%20of%20a%20Virtual%20Environment%20for%20Assessment%20of%20Memory%20Functioning-%20Virtual%20Reality%20Cognitive%20Performance%20Assessment%20Test.pdf},
year = {2008},
date = {2008-01-01},
journal = {CyberPsychology and Behavior},
volume = {11},
number = {1},
pages = {16–24},
abstract = {The current project is an initial attempt at validating the Virtual Reality Cognitive Performance Assessment Test (VRCPAT), a virtual environment–based measure of learning and memory. To examine convergent and discriminant validity, a multitrait–multimethod matrix was used in which we hypothesized that the VRCPAT’s total learning and memory scores would correlate with other neuropsychological measures involving learning and memory but not with measures involving potential confounds (i.e., executive functions; attention; processing speed; and verbal fluency). Using a sequential hierarchical strategy, each stage of test development did not proceed until specified criteria were met. The 15-minute VRCPAT battery and a 1.5-hour in-person neuropsychological assessment were conducted with a sample of 30 healthy adults, between the ages of 21 and 36, that included equivalent distributions of men and women from ethnically diverse populations. Results supported both convergent and discriminant validity. That is, findings suggest that the VRCPAT measures a capacity that is (a) consistent with that assessed by traditional paper-and-pencil measures involving learning and memory and (b) inconsistent with that assessed by traditional paper-and-pencil measures assessing neurocognitive domains traditionally assumed to be other than learning and memory. We conclude that the VRCPAT is a valid test that provides a unique opportunity to reliably and efficiently study memory function within an ecologically valid environment.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Morie, Jacquelyn; Verhulsdonck, Gustav
Body/Persona/Action! Emerging Non-anthropomorphic Communication and Interaction in Virtual Worlds Proceedings Article
In: Proceedings of the ACE 2008 ACM International Conference on Advances in Computer Entertainment Technology, Yokohama, Japan, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_bodypersonaaction_2008,
title = {Body/Persona/Action! Emerging Non-anthropomorphic Communication and Interaction in Virtual Worlds},
author = {Jacquelyn Morie and Gustav Verhulsdonck},
url = {http://ict.usc.edu/pubs/Body%20Persona%20Action%20Emerging%20Non-anthropomorphic%20Communication%20and%20Interaction%20in%20Virtual%20Worlds.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Proceedings of the ACE 2008 ACM International Conference on Advances in Computer Entertainment Technology},
address = {Yokohama, Japan},
abstract = {Avatars are traditionally understood as representing their human counterpart in virtual contexts by closely mimicking their real world physical characteristics. A new approach to designing avatars around non-anthropomorphic (non-human) characteristics currently questions the use of anthropomorphic principles and expands the use of avatars for virtual world interaction and communication. This paper provides a brief history of non-anthropomorphic avatars, with a focus on exploring the current use of such avatars in virtual worlds. In order to explain the shift in degree of anthropomorphism, we discuss Goffman's theory of symbolic interactionism, which holds that the self is constructed as a persona through social performance and relates identity to social behavior rather than appearance. Since non-anthropomorphic avatars are persistent characters engaged in a prolonged performance in virtual worlds, their use also may motivate emerging social mores, politics and ideologies. This paper argues that such avatar species create new social interactions and modes of communication that may signal interesting directions for future research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2007
Morie, Jacquelyn Ford
Performing in (virtual) spaces: embodiment and being in virtual environments Journal Article
In: International Journal of Performance Arts and Digital Media, vol. 3, no. 2-3, pp. 123–138, 2007.
Abstract | Links | BibTeX | Tags:
@article{morie_performing_2007,
title = {Performing in (virtual) spaces: embodiment and being in virtual environments},
author = {Jacquelyn Ford Morie},
url = {http://ict.usc.edu/pubs/Performing%20in%20(virtual)%20spaces%20-%20embodiment%20and%20being%20in%20virtual%20environments.pdf},
year = {2007},
date = {2007-12-01},
journal = {International Journal of Performance Arts and Digital Media},
volume = {3},
number = {2-3},
pages = {123–138},
abstract = {This paper focuses on how the body has been recontextualised in the age of digital technology, especially through the phenomenon of Virtual Reality, and specifically on fully immersive VR environments made as art or performative installations. It discusses the progresstextbackslashtextbackslashtextbackslashtextbackslashtextbackslashion in form and function from other digital media or 'cybermedia' to fully immersive virtual environments (VEs). This paper attempts to explicate the specialised and intrinsic qualities of 'Being' in immersive VEs, and how it impacts both the experience of the embodied person in the virtual environment, and our thinking about everyday reality. The unique state of Being in immersive VEs has created a paradigm shift in what humans are now able to experience, and affects how we understand our embodied selves in an increasingly digital world. Because of this, the contributions of visual and performance artists to VE’s continued development is key to how we will know and comprehend ourselves in the near and far future as creatures existing in both the physical and the digital domains. The paper draws upon twenty years as a professional Virtual Reality 'maker' who has trained in both Computer Science and in Art, and finds fascinating affinities between these disciplines in the space of the VE where people and performers interact in new embodied modalities.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Swartout, William; Traum, David; Marsella, Stacy C.; Piepol, Diane
Building Interactive Virtual Humans for Training Environments Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), Orlando, FL, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_building_2007,
title = {Building Interactive Virtual Humans for Training Environments},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and William Swartout and David Traum and Stacy C. Marsella and Diane Piepol},
url = {http://ict.usc.edu/pubs/Building%20Interactive%20Virtual%20Humans%20for%20Training%20Environments.pdf},
year = {2007},
date = {2007-11-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {There is a great need in the Joint Forces to have human to human interpersonal training for skills such as negotiation, leadership, interviewing and cultural training. Virtual environments can be incredible training tools if used properly and used for the correct training application. Virtual environments have already been very successful in training Warfighters how to operate vehicles and weapons systems. At the Institute for Creative Technologies (ICT) we have been exploring a new question: can virtual environments be used to train Warfighters in interpersonal skills such as negotiation, tactical questioning and leadership that are so critical for success in the contemporary operating environment? Using embodied conversational agents to create this type of training system has been one of the goals of the Virtual Humans project at the institute. ICT has a great deal of experience building complex, integrated and immersive training systems that address the human factor needs for training experiences. This paper will address the research, technology and value of developing virtual humans for training environments. This research includes speech recognition, natural language understanding & generation, dialogue management, cognitive agents, emotion modeling, question response managers, speech generation and non-verbal behavior. Also addressed will be the diverse set of training environments we have developed for the system, from single computer laptops to multi-computer immersive displays to real and virtual integrated environments. This paper will also discuss the problems, issues and solutions we encountered while building these systems. The paper will recount subject testing we have performed in these environments and results we have obtained from users. Finally the future of this type of Virtual Humans technology and training applications will be discussed.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Tortell, Rebecca; Williams, Josh
Would You Like to Play a Game? Experience and Expectation in Game-Based Learning Environments Book Section
In: Computer Games and Team and Individual Learning, Amsterdam, The Netherlands, 2007.
@incollection{morie_would_2007,
title = {Would You Like to Play a Game? Experience and Expectation in Game-Based Learning Environments},
author = {Jacquelyn Morie and Rebecca Tortell and Josh Williams},
year = {2007},
date = {2007-11-01},
booktitle = {Computer Games and Team and Individual Learning},
address = {Amsterdam, The Netherlands},
abstract = {We present results from a series of experiments that looked at how previous experience and immediate priming affect a user's arousal state, performance and memory in a virtual environment used for training. We found that people's game play experience had effects on these measures, and that if participants expected the environment to be a game, they approached it with expectations that were not always conducive to optimal training. We suggest that the type of game being used for training will have the best outcome if users are familiar with that mode and have the appropriate schema to approach the training.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Andrew S.; Cao, Yong; Swanson, Reid
Automated Story Capture From Internet Weblogs Proceedings Article
In: Proceedings of the 4th International Conference on Knowledge Capture, Whistler, BC, 2007.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_automated_2007,
title = {Automated Story Capture From Internet Weblogs},
author = {Andrew S. Gordon and Yong Cao and Reid Swanson},
url = {http://ict.usc.edu/pubs/Automated%20Story%20Capture%20From%20Internet%20Weblogs.pdf},
year = {2007},
date = {2007-10-01},
booktitle = {Proceedings of the 4th International Conference on Knowledge Capture},
address = {Whistler, BC},
abstract = {mong the most interesting ways that people share knowledge is through the telling of stories, i.e. first-person narratives about real life experiences. Millions of these stories appear in Internet weblogs, offering a potentially valuable resource for future knowledge management and training applications. In this paper we describe efforts to automatically capture stories from Internet weblogs by extracting them using statistical text classification techniques. We evaluate the precision and recall performance of competing approaches. We describe the large-scale application of story extraction technology to Internet weblogs, producing a corpus of stories with over a billion words.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Oh, Sejin; Gratch, Jonathan; Woontack, Woo
Explanatory Style for Socially Interactive Agents Proceedings Article
In: Lecture Notes in Computer Science, Lisbon, Portugal, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{oh_explanatory_2007,
title = {Explanatory Style for Socially Interactive Agents},
author = {Sejin Oh and Jonathan Gratch and Woo Woontack},
url = {http://ict.usc.edu/pubs/Explanatory%20Style%20for%20Socially%20Interactive%20Agents.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Lecture Notes in Computer Science},
address = {Lisbon, Portugal},
abstract = {Recent years have seen an explosion of interest in computational models of socio-emotional processes, both as a mean to deepen understanding of human behavior and as a mechanism to drive a variety of training and entertainment applications. In contrast with work on emotion, where research groups have developed detailed models of emotional processes, models of personality have emphasized shallow surface behavior. Here, we build on computational appraisal models of emotion to better characterize dispositional differences in how people come to understand social situations. Known as explanatory style, this dispositional factor plays a key role in social interactions and certain socio-emotional disorders, such as depression. Building on appraisal and attribution theories, we model key conceptual variables underlying the explanatory style, and enable agents to exhibit different explanatory tendencies according to their personalities. We describe an interactive virtual environment that uses the model to allow participants to explore individual differences in the explanation of social events, with the goal of encouraging the development of perspective taking and emotion-regulatory skills.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Roque, Antonio; Leuski, Anton; Georgiou, Panayiotis G.; Gerten, Jillian; Martinovski, Bilyana; Narayanan, Shrikanth; Robinson, Susan; Vaswani, Ashish
Hassan: A Virtual Human for Tactical Questioning Proceedings Article
In: 8th SIGdial Workshop on Discourse and Dialogue, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_hassan_2007,
title = {Hassan: A Virtual Human for Tactical Questioning},
author = {David Traum and Antonio Roque and Anton Leuski and Panayiotis G. Georgiou and Jillian Gerten and Bilyana Martinovski and Shrikanth Narayanan and Susan Robinson and Ashish Vaswani},
url = {http://ict.usc.edu/pubs/Hassan-%20A%20Virtual%20Human%20for%20Tactical%20Questioning%20.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {8th SIGdial Workshop on Discourse and Dialogue},
address = {Antwerp, Belgium},
abstract = {We present Hassan, a virtual human who engages in Tactical Questioning dialogues. We describe the tactical questioning domain, the motivation for this character, the speciï¬c architecture and present brief examples and an evaluation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Fron, Janine; Fullerton, Tracy; Morie, Jacquelyn; Pearce, Celia
The Hegemony of Play Proceedings Article
In: Proceedings of DiGRA: Situated Play, Tokyo, Japan, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{fron_hegemony_2007,
title = {The Hegemony of Play},
author = {Janine Fron and Tracy Fullerton and Jacquelyn Morie and Celia Pearce},
url = {http://ict.usc.edu/pubs/The%20Hegemony%20of%20Play.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Proceedings of DiGRA: Situated Play},
address = {Tokyo, Japan},
abstract = {In this paper, we introduce the concept of a "Hegemony of Play," to critique the way in which a complex layering of technological, commercial and cultural power structures have dominated the development of the digital game industry over the past 35 years, creating an entrenched status quo which ignores the needs and desires of "minority" players such as women and "non-gamers," Who in fact represent the majority of the population. Drawing from the history of pre-digital games, we demonstrate that these practices have "narrowed the playing field," and contrary to conventional wisdom, have actually hindered, rather than boosted, its commercial success. We reject the inevitability of these power structures, and urge those in game studies to "step up to the plate" and take a more proactive stance in questioning and critiquing the status of the Hegemony of Play.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lance, Brent; Marsella, Stacy C.
Emotionally Expressive Head and Body Movement During Gaze Shifts Proceedings Article
In: 7th International Conference on Intelligent Virtual Agents (IVA 2007), Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation
@inproceedings{lance_emotionally_2007,
title = {Emotionally Expressive Head and Body Movement During Gaze Shifts},
author = {Brent Lance and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Emotionally%20Expressive%20Head%20and%20Body%20Movement%20During%20Gaze%20Shifts.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {7th International Conference on Intelligent Virtual Agents (IVA 2007)},
address = {Paris, France},
abstract = {The current state of the art virtual characters fall far short of characters produced by skilled animators. One reason for this is that the physical behaviors of virtual characters do not express the emotions and attitudes of the character adequately. A key deficiency possessed by virtual characters is that their gaze behavior is not emotionally expressive. This paper describes work on expressing emotion through head movement and body posture during gaze shifts, with intent to integrate a model of emotionally expressive eye movement into this work in the future. The paper further describes an evaluation showing that users can recognize the emotional states generated by the model.},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
A Model of Compliance and Emotion for Potentially Adversarial Dialogue Agents Proceedings Article
In: 8th SIGdial Workshop on Discourse and Dialogue, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_model_2007,
title = {A Model of Compliance and Emotion for Potentially Adversarial Dialogue Agents},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/A%20Model%20of%20Compliance%20and%20Emotion%20for%20Potentially%20Adversarial%20Dialogue%20%20Agents.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {8th SIGdial Workshop on Discourse and Dialogue},
address = {Antwerp, Belgium},
abstract = {We present a model of compliance, for domains in which a dialogue agent may become adversarial. This model includes a set of emotions and a set of levels of compliance, and strategies for changing these.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jonsdottir, Gudny Ragna; Gratch, Jonathan; Fast, Edward; Thórisson, Kristinn R.
Fluid Semantic Back-Channel Feedback in Dialogue: Challenges & Progress Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jonsdottir_fluid_2007,
title = {Fluid Semantic Back-Channel Feedback in Dialogue: Challenges & Progress},
author = {Gudny Ragna Jonsdottir and Jonathan Gratch and Edward Fast and Kristinn R. Thórisson},
url = {http://ict.usc.edu/pubs/Fluid%20Semantic%20Back-Channel%20Feedback%20in%20Dialogue-%20Challenges%20&%20Progress.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
address = {Paris, France},
abstract = {Participation in natural, real-time dialogue calls for behaviors supported by perception-action cycles from around 100 msec and up. Generating certain kinds of such behaviors, namely envelope feedback, has been possible since the early 90s. Real-time backchannel feedback related to the content of a dialogue has been more difficult to achieve. In this paper we describe our progress in allowing virtual humans to give rapid within-utterance content-specific feedback in real-time dialogue. We present results from human-subject studies of content feedback, where results show that content feedback to a particular phrase or word in human-human dialogue comes 560-2500 msec from the phrase's onset, 1 second on average. We also describe a system that produces such feedback with an autonomous agent in limited topic domains, present performance data of this agent in human-agent interactions experiments and discuss technical challenges in light of the observed human-subject data.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
An Interactive 360° Light Field Display Proceedings Article
In: SIGGRAPH, San Diego, CA, 2007.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_interactive_2007,
title = {An Interactive 360° Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
abstract = {While a great deal of computer generated imagery is modeled and rendered in 3D, the vast majority of this 3D imagery is shown on 2D displays. Various forms of 3D displays have been contemplated and constructed for at least one hundred years [Lippman 1908], but only recent evolutions in digital capture, computation, and display have made functional and practical 3D displays possible.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Chabert, Charles-Felix; Bolas, Mark; Peers, Pieter; Debevec, Paul
A system for high-resolution face scanning based on polarized spherical illumination Proceedings Article
In: SIGGRAPH, San Diego, CA, 2007.
Links | BibTeX | Tags: Graphics, MxR
@inproceedings{ma_system_2007,
title = {A system for high-resolution face scanning based on polarized spherical illumination},
author = {Wan-Chun Ma and Tim Hawkins and Charles-Felix Chabert and Mark Bolas and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20system%20for%20high-resolution%20face%20scanning%20based%20on%20polarized%20spherical%20illumination.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Ai, Hua; Roque, Antonio; Leuski, Anton; Traum, David
Using Information State to Improve Dialogue Move Identification in a Spoken Dialogue System Proceedings Article
In: Proceedings of the 10th Interspeech Conference, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ai_using_2007,
title = {Using Information State to Improve Dialogue Move Identification in a Spoken Dialogue System},
author = {Hua Ai and Antonio Roque and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Using%20Information%20State%20to%20Improve%20Dialogue%20Move%20Identification%20in%20a%20Spoken%20Dialogue%20System.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {Proceedings of the 10th Interspeech Conference},
address = {Antwerp, Belgium},
abstract = {In this paper we investigate how to improve the performance of a dialogue move and parameter tagger for a taskoriented dialogue system using the information-state approach. We use a corpus of utterances and information states from an implemented system to train and evaluate a tagger, and then evaluate the tagger in an on-line system. Use of information state context is shown to improve performance of the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
Creating Spoken Dialogue Characters from Corpora without Annotations Proceedings Article
In: Interspeech 2007, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_creating_2007,
title = {Creating Spoken Dialogue Characters from Corpora without Annotations},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/Creating%20Spoken%20Dialogue%20Characters%20from%20Corpora%20without%20Annotations%20.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {Interspeech 2007},
address = {Antwerp, Belgium},
abstract = {Virtual humans are being used in a number of applications, including simulation-based training, multi-player games, and museum kiosks. Natural language dialogue capabilities are an essential part of their human-like persona. These dialogue systems have a goal of being believable and generally have to operate within the bounds of their restricted domains. Most dialogue systems operate on a dialogue-act level and require extensive annotation efforts. Semantic annotation and rule authoring have long been known as bottlenecks for developing dialogue systems for new domains. In this paper, we investigate several dialogue models for virtual humans that are trained on an unannotated human-human corpus. These are inspired by information retrieval and work on the surface text level. We evaluate these in text-based and spoken interactions and also against the upper baseline of human-human dialogues.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
Rendering for an Interactive 360 Degree Light Field Display Proceedings Article
In: ACM SIGGRAPH conference proceedings, San Diego, CA, 2007.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_rendering_2007,
title = {Rendering for an Interactive 360 Degree Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Rendering%20for%20an%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {ACM SIGGRAPH conference proceedings},
address = {San Diego, CA},
abstract = {We describe a set of rendering techniques for an autostereoscopic light field display able to present interactive 3D graphics to multiple simultaneous viewers 360 degrees around the display. The display consists of a high-speed video projector, a spinning mirror covered by a holographic diffuser, and FPGA circuitry to decode specially rendered DVI video signals. The display uses a standard programmable graphics card to render over 5,000 images per second of interactive 3D graphics, projecting 360-degree views with 1.25 degree separation up to 20 updates per second. We describe the system's projection geometry and its calibration process, and we present a multiple-center-of-projection rendering technique for creating perspective-correct images from arbitrary viewpoints around the display. Our projection technique allows correct vertical perspective and parallax to be rendered for any height and distance when these parameters are known, and we demonstrate this effect with interactive raster graphics using a tracking system to measure the viewer's height and distance. We further apply our projection technique to the display of photographed light fields with accurate horizontal and vertical parallax. We conclude with a discussion of the display's visual accommodation performance and discuss techniques for displaying color imagery.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Sagae, Kenji; Tsujii, Jun
Dependency parsing and domain adaptation with data-driven LR models and parser ensembles Proceedings Article
In: Proceedings of the CoNLL 2007 Shared Task. Joint Conferences on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, Prague, Czech Republic, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{sagae_dependency_2007,
title = {Dependency parsing and domain adaptation with data-driven LR models and parser ensembles},
author = {Kenji Sagae and Jun Tsujii},
url = {http://ict.usc.edu/pubs/Dependency%20Parsing%20and%20Domain%20Adaptation%20with%20LR%20Models%20and%20Parser%20Ensembles.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {Proceedings of the CoNLL 2007 Shared Task. Joint Conferences on Empirical Methods in Natural Language Processing and Computational Natural Language Learning},
address = {Prague, Czech Republic},
abstract = {We present a data-driven variant of the LR algorithm for dependency parsing, and extend it with a best-first search for probabilistic generalized data-driven LR dependency parsing. Parser actions are determined by a machine learning component, based on features that represent the current state of the parser. We apply this parsing framework to both tracks of the CoNLL 2007 shared task on dependency parsing, in each case taking advantage of multiple models trained with different learners. In the multilingual track, we train three data-driven LR models for each of the ten languages, and combine the analyses obtained with each individual model using a maximum spanning tree voting scheme. In the domain adaptation track, we use two models to parse unlabeled data in the target domain to supplement the labeled training set in the source domain, in a scheme similar to one iteration of co-training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Traum, David; Marsella, Stacy C.; Swartout, William
The More the Merrier: Multi-Party Negotiation with Virtual Humans Proceedings Article
In: AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence, pp. 1970–1971, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_more_2007,
title = {The More the Merrier: Multi-Party Negotiation with Virtual Humans},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and David Traum and Stacy C. Marsella and William Swartout},
url = {http://ict.usc.edu/pubs/The%20More%20the%20Merrier-%20Multi-Party%20Negotiation%20with%20Virtual%20Humans.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence},
volume = {2},
pages = {1970–1971},
abstract = {The goal of the Virtual Humans Project at the University of Southern California�s Institute for Creative Technologies is to enrich virtual training environments with virtual humans � autonomous agents that support face-to-face interaction with trainees in a variety of roles � through bringing together many different areas of research including speech recognition, natural language understanding, dialogue management, cognitive modeling, emotion modeling, non-verbal behavior and speech and knowledge management. The demo at AAAI will focus on our work using virtual humans to train negotiation skills. Conference attendees will negotiate with a virtual human doctor and elder to try to move a clinic out of harm�s way in single and multi-party negotiation scenarios using the latest iteration of our Virtual Humans framework. The user will use natural speech to talk to the embodied agents, who will respond in accordance with their internal task model and state. The characters will carry out a multi-party dialogue with verbal and non-verbal behavior. A video of a single-party version of the scenario was shown at AAAI-06. This new interactive demo introduces several new features, including multi-party negotiation, dynamically generated non-verbal behavior and a central ontology.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Roque, Antonio; Vaswani, Ashish; Traum, David; Hernandez, Charles; Millspaugh, Bill
Evaluation of a Spoken Dialogue System for Virtual Reality Call for Fire Training Proceedings Article
In: 10th International Pragmatics Conference, Gotenborg, Sweden, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_evaluation_2007,
title = {Evaluation of a Spoken Dialogue System for Virtual Reality Call for Fire Training},
author = {Susan Robinson and Antonio Roque and Ashish Vaswani and David Traum and Charles Hernandez and Bill Millspaugh},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20a%20Spoken%20Dialogue%20System%20for%20Virtual%20Reality%20Call%20for%20Fire%20Training.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {10th International Pragmatics Conference},
address = {Gotenborg, Sweden},
abstract = {We present an evaluation of a spoken dialogue system that engages in dialogues with soldiers training in an immersive Call for Fire (CFF) simulation. We briefly describe aspects of the Joint Fires and Effects Trainer System, and the Radiobot-CFF dialogue system, which can engage in voice communications with a trainee in call for fire dialogues. An experiment is described to judge performance of the Radiobot CFF system compared with human radio operators. Results show that while the current version of the system is not quite at humanperformance levels, it is already viable for training interaction and as an operator-controller aid.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Thagard, Paul; Ditto, Peter; Gratch, Jonathan; Marsella, Stacy C.; Westen, Drew
Emotional Cognition in the Real World Proceedings Article
In: Proceedings of the Twenty-Ninth Annual Meeting of the Cognitive Science Society, Nashville, TN, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{thagard_emotional_2007,
title = {Emotional Cognition in the Real World},
author = {Paul Thagard and Peter Ditto and Jonathan Gratch and Stacy C. Marsella and Drew Westen},
url = {http://ict.usc.edu/pubs/Emotional%20Cognition%20in%20the%20Real%20World.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Proceedings of the Twenty-Ninth Annual Meeting of the Cognitive Science Society},
address = {Nashville, TN},
abstract = {There is increasing appreciation in cognitive science of the impact of emotions on many kinds of thinking, from decision making to scientific discovery. This appreciation has developed in all the fields of cognitive science, including, psychology, philosophy, artificial intelligence, and linguistics, and anthropology. The purpose of the proposed symposium is to report and discuss new investigations of the impact of emotion on cognitive processes, in particular ones that are important in real life situations. We will approach the practical importance of emotional cognition from a variety of disciplinary perspectives: social psychology (Ditto), clinical psychology (Westen), computer science (Gratch and Marsella), and philosophy and neuroscience (Thagard). In order to provide integration across these approaches, we will try to address a fundamental set of questions, including: 1. How do emotions interact with basic cognitive processes? 2. What are the positive contributions of emotions to various kinds of thinking in real world situations? 3. How do emotions sometimes bias thinking in real world situations? 4. How can understanding of the psychology and neuroscience of emotional cognition be used to improve the effectiveness of real world thinking?},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Swanson, Reid
Generalizing Semantic Role Annotations Across Syntactically Similar Verbs Proceedings Article
In: Proceedings of the 2007 Meeting of the Association for Computational Linguistics (ACL-07), Prague, Czech Republic, 2007.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_generalizing_2007,
title = {Generalizing Semantic Role Annotations Across Syntactically Similar Verbs},
author = {Andrew S. Gordon and Reid Swanson},
url = {http://ict.usc.edu/pubs/Generalizing%20Semantic%20Role%20Annotations%20Across%20Syntactically%20Similar%20Verbs.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Proceedings of the 2007 Meeting of the Association for Computational Linguistics (ACL-07)},
address = {Prague, Czech Republic},
abstract = {Large corpora of parsed sentences with semantic role labels (e.g. PropBank) provide training data for use in the creation of high-performance automatic semantic role labeling systems. Despite the size of these corpora, individual verbs (or role-sets) often have only a handful of instances in these corpora, and only a fraction of English verbs have even a single annotation. In this paper, we describe an approach for dealing with this sparse data problem, enabling accurate semantic role labeling for novel verbs (rolesets) with only a single training example. Our approach involves the identification of syntactically similar verbs found in PropBank, the alignment of arguments in their corresponding rolesets, and the use of their corresponding annotations in PropBank as surrogate training data.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dynamic Movement and Positioning of Embodied Agents in Multiparty Conversations Proceedings Article
In: ACL 2007 Workshop on Embodied Language Processing, Prague, Czech Republic, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_dynamic_2007,
title = {Dynamic Movement and Positioning of Embodied Agents in Multiparty Conversations},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dynamic%20Movement%20and%20Positioning%20of%20Embodied%20Agents%20in%20Multiparty%20%20Conversations.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {ACL 2007 Workshop on Embodied Language Processing},
address = {Prague, Czech Republic},
abstract = {For embodied agents to engage in realistic multiparty conversation, they must stand in appropriate places with respect to other agents and the environment. When these factors change, for example when an agent joins a conversation, the agents must dynamically move to a new location and/or orientation to accommodate. This paper presents an algorithm for simulating the movement of agents based on observed human behavior using techniques developed for pedestrian movement in crowd simulations. We extend a previous group conversation simulation to include an agent motion algorithm. We examine several test cases and show how the simulation generates results that mirror real-life conversation settings.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Rizzo, Albert; Parsons, Thomas D.; Gratch, Jonathan; Swartout, William
A Virtual Human Agent for Training Novice Therapist Clinical Interviewing Skills Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine, pp. 197–210, Washington D.C., 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2007-1,
title = {A Virtual Human Agent for Training Novice Therapist Clinical Interviewing Skills},
author = {Patrick G. Kenny and Albert Rizzo and Thomas D. Parsons and Jonathan Gratch and William Swartout},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Human%20Agent%20for%20Training%20Novice%20Therapist%20Clinical%20Interviewing%20Skills.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine},
volume = {4722},
pages = {197–210},
address = {Washington D.C.},
abstract = {Virtual Reality (VR) is rapidly evolving into a pragmatically usable technology for mental health (MH) applications. Over the last five years, the technology for creating virtual humans (VHs) has evolved to the point where they are no longer regarded as simple background characters, but rather can serve a functional interactional role. Our current project involves the construction of a natural language-capable virtual client named “Justin,” which derived from a military negotiation train- ing tool into a virtual therapy patient for training novice clinicians the art of clinical interviewing with a resistant client. Justin portrays a 16-year old male with a conduct disorder who is being forced to par- ticipate in therapy by his family. The system uses a sophisticated natural language interface that al- lows novice clinicians to practice asking interview questions in an effort to create a positive therapeu- tic alliance with this very challenging virtual client. Herein we proffer a description of our iterative de- sign process and outline our long term vision.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Paek, Tim; Gandhe, Sudeep; Chickering, David Maxwel; Ju, Yun Cheng
Handling Out-of-Grammar Commands in Mobile Speech Interaction Using Backoff Filler Models Proceedings Article
In: Proceedings of the Workshop on Grammar-Based Approaches to Spoken Language Processing, pp. 33–40, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{paek_handling_2007,
title = {Handling Out-of-Grammar Commands in Mobile Speech Interaction Using Backoff Filler Models},
author = {Tim Paek and Sudeep Gandhe and David Maxwel Chickering and Yun Cheng Ju},
url = {http://ict.usc.edu/pubs/Handling%20Out-of-Grammar%20Commands%20in%20Mobile%20Speech%20Interaction%20Using%20Backoff%20Filler%20Models.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Proceedings of the Workshop on Grammar-Based Approaches to Spoken Language Processing},
pages = {33–40},
abstract = {In command and control (C&C) speech interaction, users interact by speaking commands or asking questions typically specified in a context-free grammar (CFG). Unfortunately, users often produce out-ofgrammar (OOG) commands, which can result in misunderstanding or nonunderstanding. We explore a simple approach to handling OOG commands that involves generating a backoff grammar from any CFG using filler models, and utilizing that grammar for recognition whenever the CFG fails. Working within the memory footprint requirements of a mobile C&C product, applying the approach yielded a 35% relative reduction in semantic error rate for OOG commands. It also improved partial recognitions for enabling clarification dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Crooks, Valerie C.; Parsons, Thomas D.; Buckwalter, John Galen
Validation of the Cognitive Assessment of Later Life Status (CALLS) instrument: a computerized telephonic measure Journal Article
In: BMC Neurology, vol. 7, no. 10, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{crooks_validation_2007,
title = {Validation of the Cognitive Assessment of Later Life Status (CALLS) instrument: a computerized telephonic measure},
author = {Valerie C. Crooks and Thomas D. Parsons and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Validation%20of%20the%20Cognitive%20Assessment%20of%20Later%20Life%20Status%20(CALLS)%20instrument-%20a%20computerized%20telephonic%20measure.pdf},
doi = {10.1186/1471-2377-7-10},
year = {2007},
date = {2007-05-01},
journal = {BMC Neurology},
volume = {7},
number = {10},
abstract = {Background: Brief screening tests have been developed to measure cognitive performance and dementia, yet they measure limited cognitive domains and often lack construct validity. Neuropsychological assessments, while comprehensive, are too costly and time-consuming for epidemiological studies. This study's aim was to develop a psychometrically valid telephone administered test of cognitive function in aging. Methods: Using a sequential hierarchical strategy, each stage of test development did not proceed until specified criteria were met. The 30 minute Cognitive Assessment of Later Life Status (CALLS) measure and a 2.5 hour in-person neuropsychological assessment were conducted with a randomly selected sample of 211 participants 65 years and older that included equivalent distributions of men and women from ethnically diverse populations. Results: Overall Cronbach's coefficient alpha for the CALLS test was 0.81. A principal component analysis of the CALLS tests yielded five components. The CALLS total score was significantly correlated with four neuropsychological assessment components. Older age and having a high school education or less was significantly correlated with lower CALLS total scores. Females scored better overall than males. There were no score differences based on race. Conclusion: The CALLS test is a valid measure that provides a unique opportunity to reliably and efficiently study cognitive function in large populations.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Nakasujja, Noeline; Wong, Matthew; Musisi, Seggane; Katabira, Elly; Parsons, Thomas D.; Ronald, Allan; Sacktor, Ned
Pattern of neuropsychological performance among HIV positive patients in Uganda Journal Article
In: BMC Neurology, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_pattern_2007,
title = {Pattern of neuropsychological performance among HIV positive patients in Uganda},
author = {R. Kevin Robertson and Noeline Nakasujja and Matthew Wong and Seggane Musisi and Elly Katabira and Thomas D. Parsons and Allan Ronald and Ned Sacktor},
url = {http://ict.usc.edu/pubs/Pattern%20of%20neuropsychological%20performance%20among%20HIV%20positive%20patients%20in%20Uganda.pdf},
year = {2007},
date = {2007-04-01},
journal = {BMC Neurology},
abstract = {Few studies have examined cognitive functioning of HIV positive patients in sub-Saharan Africa. It cannot be assumed that HIV positive patients in Africa exhibit the same declines as patients in high-resource settings, since there are differences that may influence cognitive functioning including nutrition, history of concomitant disease, and varying HIV strains, among other possibilities. Part of the difficulty of specifying abnormalities in neuropsychological functioning among African HIV positive patients is that there are no readily available African normative databases. The purpose of the current study was to evaluate the pattern of neuropsychological performance in a sample of HIV positive patients in comparison to HIV negative control subjects in Uganda. Methods: The neuropsychological test scores of 110 HIV positive patients (WHO Stage 2},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.
The Architectural Role of Emotion in Cognitive Systems Book Section
In: Integrated Models of Cognitive Systems, Oxford University Press, New York, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{gratch_architectural_2007,
title = {The Architectural Role of Emotion in Cognitive Systems},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/The%20Architectural%20Role%20of%20Emotion%20in%20Cognitive%20Systems.pdf},
year = {2007},
date = {2007-03-01},
booktitle = {Integrated Models of Cognitive Systems},
publisher = {Oxford University Press},
address = {New York},
abstract = {In this chapter, we will revive an old argument that theories of human emotion can give insight into the design and control of complex cognitive systems. In particular, we claim that appraisal theories of emotion provide essential insight into the influences of emotion over cognition and can help translate such findings into concrete guidance for the design of cognitive systems. Ap- praisal theory claims that emotion plays a central and functional role in sensing external events, characterizing them as opportunity or threats and recruiting the cognitive, physical and social resources needed to adaptively respond. Further, because it argues for a close association be- tween emotion and cognition, the theoretical claims of appraisal theory can be recast as a re- quirement specification for how to build a cognitive system. This specification asserts a set of judgments that must be supported in order to correctly interpret and respond to stimuli and pro- vides a unifying framework for integrating these judgments into a coherent physical or social re- sponse. This chapter elaborates argument in some detail based on our joint experience in build- ing complex cognitive systems and computational models of emotion.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Melo, Celso M.; Gratch, Jonathan
Evolving Expression of Emotions through Color in Virtual Humans using Genetic Algorithms Proceedings Article
In: Proceedings of the 1st International Conference on Computational Creativity (ICCC-X), pp. 248–257, 2007, ISBN: 978-989-96001-2-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{de_melo_evolving_2007,
title = {Evolving Expression of Emotions through Color in Virtual Humans using Genetic Algorithms},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evolving%20Expression%20of%20Emotions%20through%20Color%20in%20Virtual%20Humans%20using%20Genetic%20Algorithms.pdf},
isbn = {978-989-96001-2-6},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the 1st International Conference on Computational Creativity (ICCC-X)},
pages = {248–257},
abstract = {For centuries artists have been exploring the formal elements of art (lines, space, mass, light, color, sound, etc.) to express emotions. This paper takes this insight to explore new forms of expression for virtual humans which go beyond the usual bodily, facial and vocal expression channels. In particular, the paper focuses on how to use color to influence the perception of emotions in virtual humans. First, a lighting model and filters are used to manipulate color. Next, an evolutionary model, based on genetic algorithms, is developed to learn novel associations between emotions and color. An experiment is then conducted where non-experts evolve mappings for joy and sadness, without being aware that genetic algorithms are used. In a second experiment, the mappings are analyzed with respect to its features and how general they are. Results indicate that the average fitness increases with each new generation, thus suggesting that people are succeeding in creating novel and useful mappings for the emotions. Moreover, the results show consistent differences between the evolved images of joy and the evolved images of sadness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zbylut, Michelle L.; Metcalf, Kimberly A.; Kim, Julia; Hill, Randall W.; Rocher, Scott
Army Excellence in Leadership (AXL): A Multimedia Approach to Building Tacit Knowledge and Cultural Reasoning Technical Report
no. Technical Report 1194, 2007.
Abstract | Links | BibTeX | Tags:
@techreport{zbylut_army_2007,
title = {Army Excellence in Leadership (AXL): A Multimedia Approach to Building Tacit Knowledge and Cultural Reasoning},
author = {Michelle L. Zbylut and Kimberly A. Metcalf and Julia Kim and Randall W. Hill and Scott Rocher},
url = {http://ict.usc.edu/pubs/Army%20Excellence%20in%20Leadership%20(AXL)-%20A%20Multimedia%20Approach%20to%20Building%20Tacit%20Knowledge%20and%20Cultural%20Reasoning.pdf},
year = {2007},
date = {2007-01-01},
number = {Technical Report 1194},
abstract = {This report presents findings from a preliminary examination of the Army Excellence in Leadership (AXL) system, a leader intervention that targets the development of tacit leadership knowledge and cultural awareness in junior Army officers. Fifty-five junior officers interacted with a pilot version of a cultural awareness module from the AXL system. Results indicated that the AXL approach resulted in improvements in leader judgment on a forced-choice measure. Furthermore, results indicated that cultural issues were more salient to leaders after completion of the cultural awareness module. Reactions to training were generally positive, with officers indicating that the cultural awareness module was useful and stimulated thought. Additionally, this investigation explored the relationship between affect and learning and found that emotional responses to the AXL system were related to learning-relevant variables, such as judgment scores and officer reports that they could apply the training to their activities as a leader.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Buckwalter, John Galen; Geiger, A. M.; Parsons, Thomas D.; Handler, J.; Howes, J.; Lehmer, R. R.
Cognitive Effects of Short-term Use of Raloxifene: A Randomized Clinical Trial Journal Article
In: International Journal of Neuroscience, vol. 117, pp. 1579–1590, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{buckwalter_cognitive_2007,
title = {Cognitive Effects of Short-term Use of Raloxifene: A Randomized Clinical Trial},
author = {John Galen Buckwalter and A. M. Geiger and Thomas D. Parsons and J. Handler and J. Howes and R. R. Lehmer},
url = {http://ict.usc.edu/pubs/Cognitive%20Effects%20of%20Short-term%20Use%20of%20Raloxifene-%20A%20Randomized%20Clinical%20Trial.pdf},
year = {2007},
date = {2007-01-01},
journal = {International Journal of Neuroscience},
volume = {117},
pages = {1579–1590},
abstract = {Two questions regarding findings from the Women's Health Initiative are (1) What is the effect of various hormonal regimens including selective estrogen receptor modulators? and (2) Is the negative effect on cognitive functioning related to the older age (65+years) if the women? This study addresses these two questions in a short-term randomized trial of the effects of raloxifene versus alendronate on cognition. The study found only one significant interaction where the raloxifene and alendronate group changed differently across the two testing occasions. Hence, raloxifene does not have any impact, positive or negative, on short-term cognitive functioning when compared to alendronate.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert; Graap, Ken; McLay, Robert N.; Perlman, Karen; Rothbaum, Barbara O.; Reger, Greg; Parsons, Thomas D.; Difede, JoAnn; Pair, Jarrell
Virtual Iraq: Initial Case Reports from a VR Exposure Therapy Application for Combat-Related Post Traumatic Stress Disorder Journal Article
In: Virtual Rehabilitation, vol. 27, pp. 124–130, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_virtual_2007,
title = {Virtual Iraq: Initial Case Reports from a VR Exposure Therapy Application for Combat-Related Post Traumatic Stress Disorder},
author = {Albert Rizzo and Ken Graap and Robert N. McLay and Karen Perlman and Barbara O. Rothbaum and Greg Reger and Thomas D. Parsons and JoAnn Difede and Jarrell Pair},
url = {http://ict.usc.edu/pubs/Virtual%20Iraq-%20Initial%20Case%20Reports%20from%20a%20VR%20Exposure%20Therapy%20Application%20for%20Combat-Related%20Post%20Traumatic%20Stress%20Disorder.pdf},
year = {2007},
date = {2007-01-01},
journal = {Virtual Rehabilitation},
volume = {27},
pages = {124–130},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale and brief description of a Virtual Iraq PTSD VR therapy application and present initial findings from two successfully treated patients. The VR treatment environment was created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far, Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center and the results from two successfully treated patients are presented along with a delineation of our future plans for research and clinical care using this application.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rizzo, Albert; Bamattre, Jacob; Brennan, John
Virtual Reality Cognitive Performance Assessment Test Journal Article
In: Annual Review of CyberTherapy and Telemedicine, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_virtual_2007,
title = {Virtual Reality Cognitive Performance Assessment Test},
author = {Thomas D. Parsons and Albert Rizzo and Jacob Bamattre and John Brennan},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Cognitive%20Performance%20Assessment%20Test.pdf},
year = {2007},
date = {2007-01-01},
journal = {Annual Review of CyberTherapy and Telemedicine},
abstract = {Virtual Reality Cognitive Performance Assessment Test (VRCPAT) is a virtual environment based measure of learning and memory. We examined convergent and discriminant validity and hypothesized that the VRCPAT’s Total Learning and Memory scores would correlate with other neuropsychological measures involving learning and memory, but not with measures involving potential confounds (i.e., Executive Functions; Attention; and Processing Speed). Using a sequential hierarchical strategy, each stage of test development did not proceed until specified criteria were met. The 15 minute VRCPAT battery and a 1.5 hour in-person neuropsychological assessment were conducted with a randomly selected sample of 20 healthy adults that included equivalent distributions of men and women from ethnically diverse populations. Results supported both convergent and discriminant validity. That is, findings suggest that the VRCPAT measures a capacity that is 1) consistent with that assessed by traditional paper and pencil measures involving learning and memory; and 2) inconsistent with that assessed by traditional paper and pencil measures assessing neurocognitive domains traditionally assumed to be other than learning and memory. We conclude that the VRCPAT is a valid test that provides a unique opportunity to reliably and efficiently study memory function within an ecologically valid environment.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Leuski, Anton; Rizzo, Albert
Virtual Patients for Clinical Therapist Skills Training Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 197–210, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2007,
title = {Virtual Patients for Clinical Therapist Skills Training},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Anton Leuski and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Patients%20for%20Clinical%20Therapist%20Skills%20Training.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {197–210},
address = {Paris, France},
abstract = {Virtual humans offer an exciting and powerful potential for rich interactive experiences. Fully embodied virtual humans are growing in capability, ease, and utility. As a result, they present an opportunity for expanding research into burgeoning virtual patient medical applications. In this paper we consider the ways in which one may go about building and applying virtual human technology to the virtual patient domain. Specifically we aim to show that virtual human technology may be used to help develop the interviewing and diagnostics skills of developing clinicians. Herein we proffer a description of our iterative design process and preliminary results to show that virtual patients may be a useful adjunct to psychotherapy education.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Wang, Ning; Gerten, Jillian; Fast, Edward; Duffy, Robin
Creating Rapport with Virtual Agents Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 125–128, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_creating_2007,
title = {Creating Rapport with Virtual Agents},
author = {Jonathan Gratch and Ning Wang and Jillian Gerten and Edward Fast and Robin Duffy},
url = {http://ict.usc.edu/pubs/Creating%20Rapport%20with%20Virtual%20Agents.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {125–128},
address = {Paris, France},
abstract = {Recent research has established the potential for virtual characters to establish rapport with humans through simple contingent nonverbal behaviors. We hypothesized that the contingency, not just the frequency of positive feedback is crucial when it comes to creating rapport. The primary goal in this study was evaluative: can an agent generate behavior that engenders feelings of rapport in human speakers and how does this compare to human generated feedback? A secondary goal was to answer the question: Is contingency (as opposed to frequency) of agent feedback crucial when it comes to creating feelings of rapport? Results suggest that contingency matters when it comes to creating rapport and that agent generated behavior was as good as human listeners in creating rapport. A "virtual human listener" condition performed worse than other conditions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yeh, Shih-Ching; Rizzo, Albert; McLaughlin, Margaret; Parsons, Thomas D.
In: Studies in Health Technology and Informatics, vol. 125, pp. 506–511, 2007.
@article{yeh_vr_2007,
title = {VR Enhanced Upper Extremity Motor Training for Post-Stroke Rehabilitation: Task Design, Clinical Experiment and Visualization on Performance and Progress},
author = {Shih-Ching Yeh and Albert Rizzo and Margaret McLaughlin and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/VR%20Enhanced%20Upper%20Extremity%20Motor%20Training%20for%20Post-Stroke%20Rehabilitation-%20Task%20Design,%20Clinical%20Experiment%20and%20Visualization%20on%20Performance%20and%20Progress.pdf},
year = {2007},
date = {2007-01-01},
journal = {Studies in Health Technology and Informatics},
volume = {125},
pages = {506–511},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Bowerly, Todd; Buckwalter, John Galen; Rizzo, Albert
In: Child Neuropsychology, vol. 13, pp. 363–381, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_controlled_2007,
title = {A controlled clinical comparison of attention performance in children with ADHD in a virtual reality classroom compared to standard neuropsychological methods},
author = {Thomas D. Parsons and Todd Bowerly and John Galen Buckwalter and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20CONTROLLED%20CLINICAL%20COMPARISON%20OF%20ATTENTION%20PERFORMANCE%20IN%20CHILDREN%20WITH%20ADHD%20IN%20A%20VIRTUAL%20REALITY%20CLASSROOM%20COMPARED%20TO%20STANDARD%20NEUROPSYCHOLOGICAL%20METHODS.pdf},
doi = {10.1080/13825580600943473},
year = {2007},
date = {2007-01-01},
journal = {Child Neuropsychology},
volume = {13},
pages = {363–381},
abstract = {In this initial pilot study, a controlled clinical comparison was made of attention performance in children with attention deficit-hyperactivity disorder (ADHD) in a virtual reality (VR) classroom. Ten boys diagnosed with ADHD and ten normal control boys participated in the study. Groups did not significantly differ in mean age, grade level, ethnicity, or handedness. No participants reported simulator sickness following VR exposure. Children with ADHD exhibited more omission errors, commission errors, and overall body movement than normal control children in the VR classroom. Children with ADHD were more impacted by distraction in the VR classroom. VR classroom measures were correlated with traditional ADHD assessment tools and the flatscreen CPT. Of note, the small sample size incorporated in each group and higher WISC-III scores of normal controls might have some bearing on the overall interpretation of results. These data suggested that the Virtual Classroom had good potential for controlled performance assessment within an ecologically valid environment and appeared to parse out significant effects due to the presence of distraction stimuli.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Lane, H. Chad
Metacognition and the Development of Intercultural Competence Proceedings Article
In: Proceedings of the Workshop on Metacognition and Self-Regulated Learning in Intelligent Tutoring Systems at the 13th International Conference on Artificial Intelligence in Education (AIED), pp. 23–32, Marina del Rey, CA, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{lane_metacognition_2007,
title = {Metacognition and the Development of Intercultural Competence},
author = {H. Chad Lane},
url = {http://ict.usc.edu/pubs/Metacognition%20and%20the%20Development%20of%20Intercultural%20Competence.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the Workshop on Metacognition and Self-Regulated Learning in Intelligent Tutoring Systems at the 13th International Conference on Artificial Intelligence in Education (AIED)},
pages = {23–32},
address = {Marina del Rey, CA},
abstract = {We argue that metacognition is a critical component in the development of intercultural competence by highlighting the importance of supporting a learner's self-assessment, self-monitoring, predictive, planning and reflection skills. We also survey several modern immersive cultural learning environments and discuss the role intelligent tutoring and experience management techniques can play to support these metacognitive demands. Techniques for adapting the behaviors of virtual humans to promote cultural learning are discussed, as well as explicit approaches to feedback. We conclude with several suggestions for future research, including the use of existing intercultural development metrics for evaluating learning in immersive environments and to conduct more studies of the use of implicit and explicit feedback to guide learning and establish optimal conditions for acquiring intercultural competence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Marsella, Stacy C.
Rejection of empathy in negotiation Journal Article
In: Group Decision and Negotiation, vol. 16, pp. 61–76, 2007, ISSN: 0926-2644.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{martinovski_rejection_2007,
title = {Rejection of empathy in negotiation},
author = {Bilyana Martinovski and David Traum and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Rejection%20of%20empathy%20in%20negotiation.pdf},
issn = {0926-2644},
year = {2007},
date = {2007-01-01},
journal = {Group Decision and Negotiation},
volume = {16},
pages = {61–76},
abstract = {Trust is a crucial quality in the development of individuals and societies and empathy plays a key role in the formation of trust. Trust and empathy have growing importance in studies of negotiation. However, empathy can be rejected which complicates its role in negotiation. This paper presents a linguistic analysis of empathy by focusing on rejection of empathy in negotiation. Some of the rejections are due to failed recognition of the rejector's needs and desires whereas others have mainly strategic functions gaining momentum in the negotiation. In both cases, rejection of empathy is a phase in the negotiation not a breakdown.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Miller, Karen J.; Parsons, Thomas D.; Whybrow, Peter C.; Herle, Katja; Rasgon, Natalie; Herle, Andre; Martinez, Dorothy; Silverman, Dan H.; Bauer, Michael
Verbal Memory Retrieval Deficits Associated With Untreated Hypothyroidism Journal Article
In: Journal of Neuropsychiatry and Clinical Neurosciences, vol. 19, no. 2, pp. 132–136, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{miller_verbal_2007,
title = {Verbal Memory Retrieval Deficits Associated With Untreated Hypothyroidism},
author = {Karen J. Miller and Thomas D. Parsons and Peter C. Whybrow and Katja Herle and Natalie Rasgon and Andre Herle and Dorothy Martinez and Dan H. Silverman and Michael Bauer},
url = {http://ict.usc.edu/pubs/Verbal%20Memory%20Retrieval%20Deficits%20Associated%20With%20Untreated%20Hypothyroidism.pdf},
year = {2007},
date = {2007-01-01},
journal = {Journal of Neuropsychiatry and Clinical Neurosciences},
volume = {19},
number = {2},
pages = {132–136},
abstract = {The effects of inadequate thyroid hormone availability to the brain on adult cognitive function are poorly understood. This study assessed the effects of hypothyroidism on cognitive function using a standard neuropsychological battery in 14 patients suffering from untreated hypothyroidism and complaining of subjective cognitive difï¬culties in comparison with 10 age-matched healthy comparison subjects. Signiï¬cant differences between groups were limited to verbal memory retrieval as measured by the California Verbal Learning Test (CVLT). On short delay free recall, long delay free recall, and long delay cued recall, signiï¬cant differences remained between groups despite the limited statistical power of this study. There were no signiï¬cant results found between groups on attentional or nonverbal tasks. Results suggest that hypothyroid-related memory deï¬cits are not attributable to an attentional deï¬cit but rather to speciï¬c retrieval deï¬cits.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Elson, David K.; Riedl, Mark O.
A Lightweight Intelligent Virtual Cinematography System for Machinima Production Proceedings Article
In: Proceedings of the 3rd Annual Conference on Artificial Intelligence and Interactive Digital Entertainment AIIDE 07, Defense Technical Information Center, Palo Alto, CA, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{elson_lightweight_2007,
title = {A Lightweight Intelligent Virtual Cinematography System for Machinima Production},
author = {David K. Elson and Mark O. Riedl},
url = {http://ict.usc.edu/pubs/A%20Lightweight%20Intelligent%20Virtual%20Cinematography%20System%20for%20Machinima%20Production.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the 3rd Annual Conference on Artificial Intelligence and Interactive Digital Entertainment AIIDE 07},
publisher = {Defense Technical Information Center},
address = {Palo Alto, CA},
abstract = {Machinima is a low-cost alternative to full production filmmaking. However, creating quality cinematic visualizations with existing machinima techniques still requires a high degree of talent and effort. We introduce a lightweight artificial intelligence system, Cambot, that canbe used to assist in machinima production. Cambot takes a script as input and produces a cinematic visualization. Unlike other virtual cinematography systems, Cambot favors an offline algorithm coupled with an extensible library of specific modular and reusable facets of cinematicknowledge. One of the advantages of this approach tovirtual cinematography is a tight coordination between the positions and movements of the camera and the actors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Peers, Pieter; Chabert, Charles-Felix; Weiss, Malte; Debevec, Paul
Rapid Acquisition of Specular and Diffuse Normal Maps from Polarized Spherical Gradient Illumination Proceedings Article
In: Kautz, Jan; Pattanaik, (Ed.): Eurographics Symposium on Rendering, 2007.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ma_rapid_2007,
title = {Rapid Acquisition of Specular and Diffuse Normal Maps from Polarized Spherical Gradient Illumination},
author = {Wan-Chun Ma and Tim Hawkins and Pieter Peers and Charles-Felix Chabert and Malte Weiss and Paul Debevec},
editor = {Jan Kautz and Pattanaik},
url = {http://ict.usc.edu/pubs/Rapid%20Acquisition%20of%20Specular%20and%20Diffuse%20Normal%20Maps%20from%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Eurographics Symposium on Rendering},
abstract = {We estimate surface normal maps of an object from either its diffuse or specular reflectance using four spherical gradient illumination patterns. In contrast to traditional photometric stereo, the spherical patterns allow normals to be estimated simultaneously from any number of viewpoints. We present two polarized lighting techniques that allow the diffuse and specular normal maps of an object to be measured independently. For scattering materials, we show that the specular normal maps yield the best record of detailed surface shape while the diffuse normals deviate from the true surface normal due to subsurface scattering, and that this effect is dependent on wavelength. We show several applications of this acquisition technique. First, we capture normal maps of a facial performance simultaneously from several viewing positions using time-multiplexed illumination. Second, we show that highresolution normal maps based on the specular component can be used with structured light 3D scanning to quickly acquire high-resolution facial surface geometry using off-the-shelf digital still cameras. Finally, we present a realtime shading model that uses independently estimated normal maps for the specular and diffuse color channels to reproduce some of the perceptually important effects of subsurface scattering.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, H. Chad; Core, Mark; Gomboc, Dave; Karnavat, Ashish; Rosenberg, Milton
Intelligent Tutoring for Interpersonal and Intercultural Skills Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{lane_intelligent_2007,
title = {Intelligent Tutoring for Interpersonal and Intercultural Skills},
author = {H. Chad Lane and Mark Core and Dave Gomboc and Ashish Karnavat and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Intelligent%20Tutoring%20for%20Interpersonal%20and%20Intercultural%20Skills.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
abstract = {We describe some key issues involved in building an intelligent tutoring system for the ill-defined domain of interpersonal and intercultural skill acquisition. We discuss the consideration of mixed-result actions (actions with pros and cons), categories of actions (e.g. required steps vs. rules of thumb), the role of narrative, and reflective tutoring, among other topics. We present these ideas in the context of our work on an intelligent tutor for ELECT BiLAT, a game-based system to teach cultural awareness and negotiation skills for bilateral engagements. The tutor provides guidance in two forms: (1) as a coach that gives hints and feedback during an engagement with a virtual character, and (2) during an after-action review to help the learner reflect on their choices. Learner activities are mapped to learning objectives, which include whether the actions represent positive or negative evidence of learning. These underlie an expert model, student model, and models of coaching and reflective tutoring that support the learner. We describe several other cultural and interpersonal training systems that situate learners in goal based social contexts that include interaction with virtual characters and automated guidance. Finally, our future work includes evaluations of learning, expansion of the coach and reflective tutoring strategies, and integration of deeper knowledge-based resources that capture more nuanced cultural aspects of interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jina; Marsella, Stacy C.; Traum, David; Gratch, Jonathan; Lance, Brent
The Rickel Gaze Model: A Window on the Mind of a Virtual Human Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 296–303, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{lee_rickel_2007,
title = {The Rickel Gaze Model: A Window on the Mind of a Virtual Human},
author = {Jina Lee and Stacy C. Marsella and David Traum and Jonathan Gratch and Brent Lance},
url = {http://ict.usc.edu/pubs/The%20Rickel%20Gaze%20Model-%20A%20Window%20on%20the%20Mind%20of%20a%20Virtual%20Human.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {296–303},
address = {Paris, France},
abstract = {Gaze plays a large number of cognitive, communicative and affective roles in face-to-face human interaction. To build a believable virtual human, it is imperative to construct a gaze model that generates realistic gaze behaviors. However, it is not enough to merely imitate a person's eye movements. The gaze behaviors should reflect the internal states of the virtual human and users should be able to derive them by observing the behaviors. In this paper, we present a gaze model driven by the cognitive operations; the model processes the virtual human's reasoning, dialog management, and goals to generate behaviors that reflect the agent's inner thoughts. It has been implemented in our virtual human system and operates in real-time. The gaze model introduced in this paper was originally designed and developed by Jeff Rickel but has since been extended by the authors.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Rogers, Steven A.; Hall, Colin D.; Robertson, R. Kevin
Motor Based Assessment of Neurocognitive Functioning in Resource-Limited International Settings. Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 29, pp. 59–66, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_motor_2007,
title = {Motor Based Assessment of Neurocognitive Functioning in Resource-Limited International Settings.},
author = {Thomas D. Parsons and Steven A. Rogers and Colin D. Hall and R. Kevin Robertson},
url = {http://ict.usc.edu/pubs/Motor%20based%20assessment%20of%20neurocognitive%20functioning%20in%20resource-limited%20Iinternational%20settings.pdf},
year = {2007},
date = {2007-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {29},
pages = {59–66},
abstract = {This study compared variance accounted for by neuropsychological tests in both a brief motor battery and in a comprehensive neuropsychological battery. 327 HIV + subjects received a comprehensive cognitive battery and a shorter battery (Timed Gait, Grooved Pegboard, and Fingertapping). A significant correlation existed between the motor component tests and the more comprehensive battery (52% of variance). Adding Digit symbol and Trailmaking increased the amount of variance accounted for (73%). Motor battery sensitivity to impairment diagnosis was 0.79 and specificity was 0.76. A motor battery may have broader utility to diagnose and monitor HIV related neurocognitive disorders in international settings.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Wang, Ning; Okhmatovskaia, Anna; Lamothe, Francois; Morales, Mathieu; Werf, R. J.; Morency, Louis-Philippe
Can virtual humans be more engaging than real ones? Proceedings Article
In: Proceedings of the International Conference on Human-Computer Interaction, HCI Intelligent Multimodal Interaction Environments, pp. 286–297, Beijing, China, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_can_2007,
title = {Can virtual humans be more engaging than real ones?},
author = {Jonathan Gratch and Ning Wang and Anna Okhmatovskaia and Francois Lamothe and Mathieu Morales and R. J. Werf and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Can%20virtual%20humans%20be%20more%20engaging%20than%20real%20ones.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the International Conference on Human-Computer Interaction, HCI Intelligent Multimodal Interaction Environments},
pages = {286–297},
address = {Beijing, China},
abstract = {Emotional bonds don't arise from a simple exchange of facial displays, but often emerge through the dynamic give and take of face-to-face interactions. This article explores the phenomenon of rapport, a feeling of connectedness that seems to arise from rapid and contingent positive feedback between partners and is often associated with socio-emotional processes. Rapport has been argued to lead to communicative efficiency, better learning outcomes, improved acceptance of medical advice and successful negotiations. We provide experimental evidence that a simple virtual character that provides positive listening feedback can induce stronger rapport-like effects than face-to-face communication between human partners. Specifically, this interaction can be more engaging to storytellers than speaking to a human audience, as measured by the length and content of their stories.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Iudicello, Jennifer E.; Woods, Steven Paul; Parsons, Thomas D.; Moran, Lisa M.; Carey, Catherine L.; Grant, Igor
Verbal fluency in HIV infection: A meta-analytic review Journal Article
In: Journal of the International Neuropsychological Society, vol. 13, pp. 183–189, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{iudicello_verbal_2007,
title = {Verbal fluency in HIV infection: A meta-analytic review},
author = {Jennifer E. Iudicello and Steven Paul Woods and Thomas D. Parsons and Lisa M. Moran and Catherine L. Carey and Igor Grant},
url = {http://ict.usc.edu/pubs/Verbal%20fluency%20in%20HIV%20infection-%20A%20meta-analytic%20review.pdf},
doi = {10.10170S1355617707070221},
year = {2007},
date = {2007-01-01},
journal = {Journal of the International Neuropsychological Society},
volume = {13},
pages = {183–189},
abstract = {Given the largely prefrontostriatal neuropathogenesis of HIV-associated neurobehavioral deficits, it is often presumed that HIV infection leads to greater impairment on letter versus category fluency. A meta-analysis of the HIV verbal fluency literature was conducted (k 5 37, n 5 7110) to assess this hypothesis and revealed generally small effect sizes for both letter and category fluency, which increased in magnitude with advancing HIV disease severity. Across all studies, the mean effect size of category fluency was slightly larger than that of letter fluency. However, the discrepancy between category and letter fluency dissipated in a more conservative analysis of only those studies that included both tests. Thus, HIV-associated impairments in letter and category fluency are of similar magnitude, suggesting that mild word generation deficits are evident in HIV, regardless of whether traditional letter or semantic cues are used to guide the word search and retrieval process.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Smurzynski, Marlene; Parsons, Thomas D.; Wu, Kunling; Bosch, Ronald J.; Wu, Julia; McArthur, Justin C.; Collier, Ann C.; Evans, Scott R.; Ellis, Ron J.
The Prevalence and Incidence of Neurocognitive Impairment in the HAART Era Journal Article
In: AIDS, vol. 21, pp. 1915–1921, 2007, ISSN: 0269-9370.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_prevalence_2007,
title = {The Prevalence and Incidence of Neurocognitive Impairment in the HAART Era},
author = {R. Kevin Robertson and Marlene Smurzynski and Thomas D. Parsons and Kunling Wu and Ronald J. Bosch and Julia Wu and Justin C. McArthur and Ann C. Collier and Scott R. Evans and Ron J. Ellis},
url = {http://ict.usc.edu/pubs/The%20prevalence%20and%20incidence%20of%20neurocognitive%20impairment%20in%20the%20HAART%20era.pdf},
issn = {0269-9370},
year = {2007},
date = {2007-01-01},
journal = {AIDS},
volume = {21},
pages = {1915–1921},
abstract = {Objectives: HAART suppresses HIV viral replication and restores immune function. The effects of HAART on neurological disease are less well understood. The aim of this study was to assess the prevalence and incidence of neurocognitive impairment in individuals who initiated HAART as part of an AIDS clinical trial. Design: A prospective cohort study of HIV-positive patients enrolled in randomized antiretroviral trials, the AIDS Clinical Trials Group (ACTG) Longitudinal Linked Randomized Trials (ALLRT) study. Methods: We examined the association between baseline and demographic characteristics and neurocognitive impairment among 1160 subjects enrolled in the ALLRT study. Results: A history of immunosuppression (nadir CD4 cell count textbackslashtextbackslashtextbackslashtextbackslashtextless 200 cells/ml) was associated with an increase in prevalent neurocognitive impairment. There were no signiï¬cant virological and immunological predictors of incident neurocognitive impairment. Current immune status (low CD4 cell count) was associated with sustained prevalent impairment. Conclusion: The association of previous advanced immunosuppression with prevalent and sustained impairment suggests that there is a non-reversible component of neural injury that tracks with a history of disease progression. The association of sustained impairment with worse current immune status (low CD4 cell count) suggests that restoring immunocompetence increases the likelihood of neurocognitive recovery. Finally, the lack of association between incident neurocognitive impairment and virological and immunological indicators implies that neural injury continues in some patients regardless of the success of antiretroviral therapy on these laboratory measures.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Lamond, Bruce; Peers, Pieter; Debevec, Paul
Fast Image-based Separation of Diffuse and Specular Reflections Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2007, 2007.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{lamond_fast_2007,
title = {Fast Image-based Separation of Diffuse and Specular Reflections},
author = {Bruce Lamond and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-02-2007.pdf},
year = {2007},
date = {2007-01-01},
number = {ICT TR 02 2007},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel image-based method for separating diffuse and specular reflections of real objects under distant environmental illumination. By illuminating a scene with only four high frequency illumination patterns, the specular and diffuse reflections can be separated by computing the maximum and minimum observed pixel values. Furthermore, we show that our method can be extended to separate diffuse and specular components under image-based environmental illumination. Applications range from image-based modeling of reflectance properties to improved normal and geometry acquisition.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Gandhe, Sudeep; Traum, David
First Steps Towards Dialogue Modelling from an Un-annotated Human-Human Corpus Proceedings Article
In: 5th Workshop on Knowledge and Reasoning in Practical Dialogue Systems, Hyderabad, India, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_first_2007,
title = {First Steps Towards Dialogue Modelling from an Un-annotated Human-Human Corpus},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/First%20Steps%20towards%20Dialogue%20Modelling%20from%20an%20Un-annotated%20Human-Human%20Corpus.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {5th Workshop on Knowledge and Reasoning in Practical Dialogue Systems},
address = {Hyderabad, India},
abstract = {Virtual human characters equipped with natural language dialogue capability have proved useful in many fields like simulation training and interactive games. Generally behind such dialogue managers lies a complex knowledge-rich rule-based system. Building such system involves meticulous annotation of data and hand autoring of rules. In this paper we build a statistical dialogue model from roleplay and wizard of oz dialog corpus with virtually no annotation. We compare these methods with the tra ditional approaches. We have evaluated these systems for perceived appropriateness of response and the results are presented here.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}