Publications
Search
Pestian, John P.; Sorter, Michael; Connolly, Brian; Cohen, Kevin Bretonnel; McCullumsmith, Cheryl; Gee, Jeffry T.; Morency, Louis-Philippe; Scherer, Stefan; Rohlfs, Lesley
A Machine Learning Approach to Identifying the Thought Markers of Suicidal Subjects: A Prospective Multicenter Trial Journal Article
In: Suicide and Life-Threatening Behavior, 2016, ISSN: 03630234.
@article{pestian_machine_2016,
title = {A Machine Learning Approach to Identifying the Thought Markers of Suicidal Subjects: A Prospective Multicenter Trial},
author = {John P. Pestian and Michael Sorter and Brian Connolly and Kevin Bretonnel Cohen and Cheryl McCullumsmith and Jeffry T. Gee and Louis-Philippe Morency and Stefan Scherer and Lesley Rohlfs},
url = {http://doi.wiley.com/10.1111/sltb.12312},
doi = {10.1111/sltb.12312},
issn = {03630234},
year = {2016},
date = {2016-11-01},
journal = {Suicide and Life-Threatening Behavior},
abstract = {Death by suicide demonstrates profound personal suffering and societal failure. While basic sciences provide the opportunity to understand biological markers related to suicide, computer science provides opportunities to understand suicide thought markers. In this novel prospective, multimodal, multicenter, mixed demographic study, we used machine learning to measure and fuse two classes of suicidal thought markers: verbal and nonverbal. Machine learning algorithms were used with the subjects’ words and vocal characteristics to classify 379 subjects recruited from two academic medical centers and a rural community hospital into one of three groups: suicidal, mentally ill but not suicidal, or controls. By combining linguistic and acoustic characteristics, subjects could be classified into one of the three groups with up to 85% accuracy. The results provide insight into how advanced technology can be used for suicide assessment and prevention.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Woolley, Joshua; Khooshabeh, Peter; Scherer, Stefan
Getting to know you: a multimodal investigation of team behavior and resilience to stress Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 193–200, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{neubauer_getting_2016,
title = {Getting to know you: a multimodal investigation of team behavior and resilience to stress},
author = {Catherine Neubauer and Joshua Woolley and Peter Khooshabeh and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993195},
doi = {10.1145/2993148.2993195},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {193–200},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {Team cohesion has been suggested to be a critical factor in emotional resilience following periods of stress. Team cohesion may depend on several factors including emotional state, communication among team members and even psychophysiological response. The present study sought to employ several multimodal techniques designed to investigate team behavior as a means of understanding resilience to stress. We recruited 40 subjects to perform a cooperative-task in gender-matched, two-person teams. They were responsible for working together to meet a common goal, which was to successfully disarm a simulated bomb. This high-workload task requires successful cooperation and communication among members. We assessed several behaviors that relate to facial expression, word choice and physiological responses (i.e., heart rate variability) within this scenario. A manipulation of an â€oeice breaker†condition was used to induce a level of comfort or familiarity within the team prior to the task. We found that individuals in the â€oeice breaker†condition exhibited better resilience to subjective stress following the task. These individuals also exhibited more insight and cognitive speech, more positive facial expressions and were also able to better regulate their emotional expression during the task, compared to the control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Prendinger, Helmut; Scherer, Stefan
Native vs. Non-native Language Fluency Implications on Multimodal Interaction for Interpersonal Skills Training Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 386–393, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{chollet_native_2016,
title = {Native vs. Non-native Language Fluency Implications on Multimodal Interaction for Interpersonal Skills Training},
author = {Mathieu Chollet and Helmut Prendinger and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993196},
doi = {10.1145/2993148.2993196},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {386–393},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {New technological developments in the eld of multimodal interaction show great promise for the improvement and assessment of public speaking skills. However, it is unclear how the experience of non-native speakers interacting with such technologies di ers from native speakers. In particular, nonnative speakers could bene t less from training with multimodal systems compared to native speakers. Additionally, machine learning models trained for the automatic assessment of public speaking ability on data of native speakers might not be performing well for assessing the performance of non-native speakers. In this paper, we investigate two aspects related to the performance and evaluation of multimodal interaction technologies designed for the improvement and assessment of public speaking between a population of English native speakers and a population of non-native English speakers. Firstly, we compare the experiences and training outcomes of these two populations interacting with a virtual audience system designed for training public speaking ability, collecting a dataset of public speaking presentations in the process. Secondly, using this dataset, we build regression models for predicting public speaking performance on both populations and evaluate these models, both on the population they were trained on and on how they generalize to the second population.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olszewski, Kyle; Lim, Joseph J.; Saito, Shunsuke; Li, Hao
High-fidelity facial and speech animation for VR HMDs Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 6, pp. 1–14, 2016, ISSN: 07300301.
@article{olszewski_high-fidelity_2016,
title = {High-fidelity facial and speech animation for VR HMDs},
author = {Kyle Olszewski and Joseph J. Lim and Shunsuke Saito and Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=2980179.2980252},
doi = {10.1145/2980179.2980252},
issn = {07300301},
year = {2016},
date = {2016-11-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {6},
pages = {1–14},
abstract = {Several significant challenges currently prohibit expressive interaction in virtual reality (VR). The occlusion introduced by modern head-mounted displays (HMDs) makes most existing techniques for facial tracking intractable in this scenario. Furthermore, even state-of-the-art techniques used for real-time facial tracking in less constrained environments fail to capture subtle details of the user’s facial expressions that are essential for compelling speech animation. We introduce a novel system for HMD users to control a digital avatar in real-time while producing plausible speech animation and emotional expressions. Using a monocular camera attached to the front of an HMD, we record video sequences from multiple subjects performing a variety of facial expressions and speaking several phonetically-balanced sentences. These images are used with artist-generated animation data corresponding to these sequences to train a convolutional neural network (CNN) to regress images of a user’s mouth region to the parameters that control a digital avatar. To make training this system more tractable, we make use of audiobased alignment techniques to map images of multiple users making the same utterance to the corresponding animation parameters. We demonstrate that our regression technique is also feasible for tracking the expressions around the user’s eye region, including the eyebrows, with an infrared (IR) camera within the HMD, thereby enabling full facial tracking. This system requires no user-specific calibration, makes use of easily obtainable consumer hardware, and produces high-quality animations of both speech and emotional expressions. Finally, we demonstrate the quality of our system on a variety of subjects and evaluate its performance against state-of-the-art realtime facial tracking techniques.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale; Stratou, Giota; Lieblich, Shari; Gratch, Jonathan
Trust Me: Multimodal Signals of Trustworthiness Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 5–12, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{lucas_trust_2016,
title = {Trust Me: Multimodal Signals of Trustworthiness},
author = {Gale Lucas and Giota Stratou and Shari Lieblich and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993178},
doi = {10.1145/2993148.2993178},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {5–12},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {This paper builds on prior psychological studies that identify signals of trustworthiness between two human negotiators. Unlike prior work, the current work tracks such signals automatically and fuses them into computational models that predict trustworthiness. To achieve this goal, we apply automatic trackers to recordings of human dyads negotiating in a multi-issue bargaining task. We identify behavioral indicators in different modalities (facial expressions, gestures, gaze, and conversational features) that are predictive of trustworthiness. We predict both objective trustworthiness (i.e., are they honest) and perceived trustworthiness (i.e., do they seem honest to their interaction partner). Our experiments show that people are poor judges of objective trustworthiness (i.e., objective and perceived trustworthiness are predicted by different indicators), and that multimodal approaches better predict objective trustworthiness, whereas people overly rely on facial expressions when judging the honesty of their partner. Moreover, domain knowledge (from the literature and prior analysis of behaviors) facilitates the model development process.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Traum, David; Boberg, Jill; Gainer, Alesia; Gratch, Jonathan; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Niki and Julie: A Robot and Virtual Human for Studying Multimodal Social Interaction Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 402–403, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{artstein_niki_2016,
title = {Niki and Julie: A Robot and Virtual Human for Studying Multimodal Social Interaction},
author = {Ron Artstein and David Traum and Jill Boberg and Alesia Gainer and Jonathan Gratch and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2998532},
doi = {10.1145/2993148.2998532},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {402–403},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {We demonstrate two agents, a robot and a virtual human, which can be used for studying factors that impact social influence. The agents engage in dialogue scenarios that build familiarity, share information, and attempt to influence a human participant. The scenarios are variants of the classical “survival task,” where members of a team rank the importance of a number of items (e.g., items that might help one survive a crash in the desert). These are ranked individually and then re-ranked following a team discussion, and the difference in ranking provides an objective measure of social influence. Survival tasks have been used in psychology, virtual human research, and human-robot interaction. Our agents are operated in a “Wizard-of-Oz” fashion, where a hidden human operator chooses the agents’ dialogue actions while interacting with an experiment participant.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark G.; Georgila, Kallirroi; Nye, Benjamin D.; Auerbach, Daniel; Liu, Zhi Fei; DiNinni, Richard
Learning, Adaptive Support, Student Traits, and Engagement in Scenario-Based Learning Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{core_learning_2016,
title = {Learning, Adaptive Support, Student Traits, and Engagement in Scenario-Based Learning},
author = {Mark G. Core and Kallirroi Georgila and Benjamin D. Nye and Daniel Auerbach and Zhi Fei Liu and Richard DiNinni},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {Scenario-based training systems pose an especially difficult challenge for an intelligent tutoring system (ITS). In addition to the basic problems of deciding when to intervene and what guidance to provide, the ITS must decide whether to give guidance directly (e.g., a hint message), indirectly through positive/negative results in the scenario, or to delay guidance until a post-scenario review session. There are a number of factors that an adaptive ITS should consider and we use self-report survey instruments to investigate the relationship between traits, learning strategies, expectations, learner behaviors derived from log files, post-use perceptions of the system, and pre-test and post-test results. We use the ELITE Lite Counseling training system as a testbed for our experiments. This system uses virtual role players to allow learners to practice leadership counseling skills, and is in use at the United States Military Academy (USMA). This paper analyzes two data sets. We collected data from local university students, a non-military population of roughly the same age as USMA Cadets using the system. For these local participants, we could administer surveys and pre-tests and post-tests, and collect log files recording clicks made while using ELITE Lite. The second data set comes from USMA itself but is limited to log files. In both populations, the ITS’s hints are effective at boosting scenario performance, and for the university students, the overall experience promoted learning, and survey results suggest that higher levels of organization in study habits may lead to greater learning with ELITE Lite. For the USMA Cadets, ELITE Lite is part of their Military Leadership course rather than an experiment, which could explain why we found higher scenario performance on average than the non-military population, and more use of the post-scenario review feature.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale; Szablowski, Evan; Gratch, Jonathan; Feng, Andrew; Huang, Tiffany; Boberg, Jill; Shapiro, Ari
The effect of operating a virtual doppleganger in a 3D simulation Proceedings Article
In: Proceedings of the 9th International Conference on Motion in Games, pp. 167–174, ACM Press, Burlingame, CA, 2016, ISBN: 978-1-4503-4592-7.
@inproceedings{lucas_effect_2016,
title = {The effect of operating a virtual doppleganger in a 3D simulation},
author = {Gale Lucas and Evan Szablowski and Jonathan Gratch and Andrew Feng and Tiffany Huang and Jill Boberg and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2994258.2994263},
doi = {10.1145/2994258.2994263},
isbn = {978-1-4503-4592-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 9th International Conference on Motion in Games},
pages = {167–174},
publisher = {ACM Press},
address = {Burlingame, CA},
abstract = {Recent advances in scanning technology have enabled the widespread capture of 3D character models based on human subjects. Intuition suggests that, with these new capabilities to create avatars that look like their users, every player should have his or her own avatar to play video games or simulations. We explicitly test the impact of having one’s own avatar (vs. a yoked control avatar) in a simulation (i.e., maze running task with mines). We test the impact of avatar identity on both subjective (e.g., feeling connected and engaged, liking avatar’s appearance, feeling upset when avatar’s injured, enjoying the game) and behavioral variables (e.g., time to complete task, speed, number of mines triggered, riskiness of maze path chosen). Results indicate that having an avatar that looks like the user improves their subjective experience, but there is no significant effect on how users perform in the simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ryan, James; Swanson, Reid
Recognizing Coherent Narrative Blog Content Proceedings Article
In: Proceeedings of the International Conference on Interactive Digital Storytelling, pp. 234–246, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-48278-1 978-3-319-48279-8.
@inproceedings{ryan_recognizing_2016,
title = {Recognizing Coherent Narrative Blog Content},
author = {James Ryan and Reid Swanson},
url = {http://link.springer.com/10.1007/978-3-319-48279-8_21},
doi = {10.1007/978-3-319-48279-8_21},
isbn = {978-3-319-48278-1 978-3-319-48279-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceeedings of the International Conference on Interactive Digital Storytelling},
pages = {234–246},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Interactive storytelling applications have at their disposal massive numbers of human-authored stories, in the form of narrative weblog posts, from which story content could be harvested and repurposed. Such repurposing is currently inhibited, however, in that many blog narratives are not sufficiently coherent for use in these applications. In a narrative that is not coherent, the order of the events in the narrative is not clear given the text of the story. We present the results of a study exploring automatic methods for estimating the coherence of narrative blog posts. In the end, our simplest model—one that only considers the degree to which story text is capitalized and punctuated—vastly outperformed a baseline model and, curiously, a series of more sophisticated models. Future work may use this simple model as a baseline, or may use it along with the classifier that it extends to automatically extract large numbers of narrative blog posts from the web for purposes such as interactive storytelling.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Saito, Shunsuke; Li, Tianye; Li, Hao
Real-Time Facial Segmentation and Performance Capture from RGB Input Proceedings Article
In: Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016), pp. 244–261, Springer International Publishing, Amsterdam, The Netherlands, 2016, ISBN: 978-3-319-46483-1 978-3-319-46484-8.
@inproceedings{saito_real-time_2016,
title = {Real-Time Facial Segmentation and Performance Capture from RGB Input},
author = {Shunsuke Saito and Tianye Li and Hao Li},
url = {https://link.springer.com/chapter/10.1007/978-3-319-46484-8_15},
isbn = {978-3-319-46483-1 978-3-319-46484-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016)},
pages = {244–261},
publisher = {Springer International Publishing},
address = {Amsterdam, The Netherlands},
abstract = {We introduce the concept of unconstrained real-time 3D facial performance capture through explicit semantic segmentation in the RGB input. To ensure robustness, cutting edge supervised learning approaches rely on large training datasets of face images captured in the wild. While impressive tracking quality has been demonstrated for faces that are largely visible, any occlusion due to hair, accessories, or hand-to-face gestures would result in significant visual artifacts and loss of tracking accuracy. The modeling of occlusions has been mostly avoided due to its immense space of appearance variability. To address this curse of high dimensionality, we perform tracking in unconstrained images assuming non-face regions can be fully masked out. Along with recent breakthroughs in deep learning, we demonstrate that pixel-level facial segmentation is possible in real-time by repurposing convolutional neural networks designed originally for general semantic segmentation. We develop an efficient architecture based on a two-stream deconvolution network with complementary characteristics, and introduce carefully designed training samples and data augmentation strategies for improved segmentation accuracy and robustness. We adopt a state-of-the-art regression-based facial tracking framework with segmented face images as training, and demonstrate accurate and uninterrupted facial performance capture in the presence of extreme occlusion and even side views. Furthermore, the resulting segmentation can be directly used to composite partial 3D face models on the input images and enable seamless facial manipulation tasks, such as virtual make-up or face replacement.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Rhuizhe; Wei, Lingyu; Vouga, Etienne; Huang, Qixing; Ceylan, Duygu; Medioni, Gerard; Li, Hao
Capturing Dynamic Textured Surfaces of Moving Targets Proceedings Article
In: Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016 Spotlight Presentation), Springer International Publishing, Amsterdam, The Netherlands, 2016, ISBN: 978-3-319-46477-0 978-3-319-46478-7.
@inproceedings{wang_capturing_2016,
title = {Capturing Dynamic Textured Surfaces of Moving Targets},
author = {Rhuizhe Wang and Lingyu Wei and Etienne Vouga and Qixing Huang and Duygu Ceylan and Gerard Medioni and Hao Li},
url = {https://link.springer.com/chapter/10.1007/978-3-319-46478-7_17},
isbn = {978-3-319-46477-0 978-3-319-46478-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016 Spotlight Presentation)},
publisher = {Springer International Publishing},
address = {Amsterdam, The Netherlands},
abstract = {We present an end-to-end system for reconstructing complete watertight and textured models of moving subjects such as clothed humans and animals, using only three or four handheld sensors. The heart of our framework is a new pairwise registration algorithm that minimizes, using a particle swarm strategy, an alignment error metric based on mutual visibility and occlusion. We show that this algorithm reliably registers partial scans with as little as 15% overlap without requiring any initial correspondences, and outperforms alternative global registration algorithms. This registration algorithm allows us to reconstruct moving subjects from free-viewpoint video produced by consumer-grade sensors, without extensive sensor calibration, constrained capture volume, expensive arrays of cameras, or templates of the subject geometry.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, J. Adam; Krum, David M.; Bolas, Mark T.
Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments Journal Article
In: ACM Transactions on Applied Perception, vol. 14, no. 2, pp. 1–17, 2016, ISSN: 15443558.
@article{jones_vertical_2016,
title = {Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments},
author = {J. Adam Jones and David M. Krum and Mark T. Bolas},
url = {http://dl.acm.org/citation.cfm?id=2983631},
doi = {10.1145/2983631},
issn = {15443558},
year = {2016},
date = {2016-10-01},
journal = {ACM Transactions on Applied Perception},
volume = {14},
number = {2},
pages = {1–17},
abstract = {In this article, we detail a series of experiments that examines the effect of vertical field-of-view extension and the addition of non-specific peripheral visual stimulation on gait characteristics and distance judgments in a head-worn virtual environment. Specifically, we examined four field-of-view configurations: a common 60° diagonal field of view (48° × 40°), a 60° diagonal field of view with the addition of a luminous white frame in the far periphery, a field of view with an extended upper edge, and a field of view with an extended lower edge. We found that extension of the field of view, either with spatially congruent or spatially non-informative visuals, resulted in improved distance judgments and changes in observed posture. However, these effects were not equal across all field-of-view configurations, suggesting that some configurations may be more appropriate than others when balancing performance, cost, and ergonomics.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Feng, Andrew W.; Seymour, Mike; Shapiro, Ari
Smart Mobile Virtual Characters: Video Characters vs. Animated Characters Proceedings Article
In: Proceedings of the Fourth International Conference on Human Agent Interaction, pp. 371–374, ACM Press, Biopolis, Singapore, 2016, ISBN: 978-1-4503-4508-8.
@inproceedings{kang_smart_2016,
title = {Smart Mobile Virtual Characters: Video Characters vs. Animated Characters},
author = {Sin-Hwa Kang and Andrew W. Feng and Mike Seymour and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2980511},
doi = {10.1145/2974804.2980511},
isbn = {978-1-4503-4508-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the Fourth International Conference on Human Agent Interaction},
pages = {371–374},
publisher = {ACM Press},
address = {Biopolis, Singapore},
abstract = {This study investigates presentation techniques for a chatbased virtual human that communicates engagingly with users via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with an animated 3D virtual character as opposed to a real human video character capable of displaying backchannel behaviors. The findings of our study demonstrate that people are socially attracted to a 3D animated character that does not display backchannel behaviors more than a real human video character that presents realistic backchannel behaviors. People engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that exhibits backchannel behaviors, compared to communicating with a real human video character that does not display backchannel behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Feng, Andrew W.; Seymour, Mike; Shapiro, Ari
Study comparing video-based characters and 3D-based characters on mobile devices for chat Proceedings Article
In: Proceedings of the 9th International Conference on Motion in Games, pp. 181–186, ACM Press, Burlingame, California, 2016, ISBN: 978-1-4503-4592-7.
@inproceedings{kang_study_2016,
title = {Study comparing video-based characters and 3D-based characters on mobile devices for chat},
author = {Sin-Hwa Kang and Andrew W. Feng and Mike Seymour and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2994274},
doi = {10.1145/2994258.2994274},
isbn = {978-1-4503-4592-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 9th International Conference on Motion in Games},
pages = {181–186},
publisher = {ACM Press},
address = {Burlingame, California},
abstract = {This study explores presentation techniques for a chat-based virtual human that communicates engagingly with users. Interactions with the virtual human occur via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with an animated virtual character as opposed to a real human video character capable of displaying realistic backchannel behaviors. An audio-only interface is compared additionally with the two types of characters. The findings of our study suggest that people are socially attracted to a 3D animated character that does not display backchannel behaviors more than a real human video character that presents realistic backchannel behaviors. People engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that exhibits realistic backchannel behaviors, compared to communicating with a real human video character that does not display backchannel behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Joshi, Himanshu; Rosenbloom, Paul S.; Ustun, Volkan
Continuous phone recognition in the Sigma cognitive architecture Journal Article
In: Biologically Inspired Cognitive Architectures, vol. 18, pp. 23–32, 2016, ISSN: 2212683X.
@article{joshi_continuous_2016,
title = {Continuous phone recognition in the Sigma cognitive architecture},
author = {Himanshu Joshi and Paul S. Rosenbloom and Volkan Ustun},
url = {http://linkinghub.elsevier.com/retrieve/pii/S2212683X16300652},
doi = {10.1016/j.bica.2016.09.001},
issn = {2212683X},
year = {2016},
date = {2016-10-01},
journal = {Biologically Inspired Cognitive Architectures},
volume = {18},
pages = {23–32},
abstract = {Spoken language processing is an important capability of human intelligence that has hitherto been unexplored by cognitive architectures. This reflects on both the symbolic and sub-symbolic nature of the speech problem, and the capabilities provided by cognitive architectures to model the latter and its rich interplay with the former. Sigma has been designed to leverage the state-of-the-art hybrid (discrete + continuous) mixed (symbolic + probabilistic) capability of graphical models to provide in a uniform non-modular fashion effective forms of, and integration across, both cognitive and sub-cognitive behavior. In this article, previous work on speaker dependent isolated word recognition has been extended to demonstrate Sigma’s feasibility to process a stream of fluent audio and recognize phones, in an online and incremental manner with speaker independence. Phone recognition is an important step in integrating spoken language processing into Sigma. This work also extends the acoustic front-end used in the previous work in service of speaker independence. All of the knowledge used in phone recognition was added supraarchitecturally – i.e. on top of the architecture – without requiring the addition of new mechanisms to the architecture.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bernardet, Ulysses; Chollet, Mathieu; DiPaola, Steve; Scherer, Stefan
An Architecture for Biologically Grounded Real-Time Reflexive Behavior Book Section
In: Intelligent Virtual Agents, vol. 10011, pp. 295–305, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-47664-3 978-3-319-47665-0.
@incollection{bernardet_architecture_2016,
title = {An Architecture for Biologically Grounded Real-Time Reflexive Behavior},
author = {Ulysses Bernardet and Mathieu Chollet and Steve DiPaola and Stefan Scherer},
url = {http://download.springer.com/static/pdf/224/chp%253A10.1007%252F978-3-319-47665-0_26.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-319-47665-0_26&token2=exp=1485296780 acl=%2Fstatic%2Fpdf%2F224%2Fchp%25253A10.1007%25252F978-3-319-47665-0_26.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Fchapter%252F10.1007%252F978-3-319-47665-0_26* hmac=1bf37d11eda93937fedd36843994ffdaf645ebda569c86edbcf61ca905942f89},
isbn = {978-3-319-47664-3 978-3-319-47665-0},
year = {2016},
date = {2016-10-01},
booktitle = {Intelligent Virtual Agents},
volume = {10011},
pages = {295–305},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {In this paper, we present a reflexive behavior architecture, that is geared towards the application in the control of the non-verbal behavior of the virtual humans in a public speaking training system. The model is organized along the distinction between behavior triggers that are internal (endogenous) to the agent, and those that origin in the environment (exogenous). The endogenous subsystem controls gaze behavior, triggers self-adaptors, and shifts between different postures, while the exogenous system controls the reaction towards auditory stimuli with different temporal and valence characteristics. We evaluate the different components empirically by letting participants compare the output of the proposed system to valid alternative variations.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Valstar, Michel; Gratch, Jonathan; Schuller, Björn; Ringeval, Fabien; Lalanne, Denis; Torres, Mercedes Torres; Scherer, Stefen; Stratou, Giota; Cowie, Roddy; Pantic, Maja
AVEC 2016: Depression, Mood, and Emotion Recognition Workshop and Challenge Proceedings Article
In: Proceedings of the 6th International Workshop on Audio/Visual Emotion Challenge, pp. 3–10, ACM Press, Amsterdam, The Netherlands, 2016, ISBN: 978-1-4503-4516-3.
@inproceedings{valstar_avec_2016,
title = {AVEC 2016: Depression, Mood, and Emotion Recognition Workshop and Challenge},
author = {Michel Valstar and Jonathan Gratch and Björn Schuller and Fabien Ringeval and Denis Lalanne and Mercedes Torres Torres and Stefen Scherer and Giota Stratou and Roddy Cowie and Maja Pantic},
url = {http://dl.acm.org/citation.cfm?id=2988258},
doi = {10.1145/2988257.2988258},
isbn = {978-1-4503-4516-3},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 6th International Workshop on Audio/Visual Emotion Challenge},
pages = {3–10},
publisher = {ACM Press},
address = {Amsterdam, The Netherlands},
abstract = {The Audio/Visual Emotion Challenge and Workshop (AVEC 2016) "Depression, Mood and Emotion" will be the sixth competition event aimed at comparison of multimedia processing and machine learning methods for automatic audio, visual and physiological depression and emotion analysis, with all participants competing under strictly the same conditions. The goal of the Challenge is to provide a common benchmark test set for multi-modal information processing and to bring together the depression and emotion recognition communities, as well as the audio, video and physiological processing communities, to compare the relative merits of the various approaches to depression and emotion recognition under well-defined and strictly comparable conditions and establish to what extent fusion of the approaches is possible and beneficial. This paper presents the challenge guidelines, the common data used, and the performance of the baseline system on the two tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Tin, Jessica; Brown, Jeremy; Fritzsch, Elisabeth; Gabber, Shirley
Wochat Chatbot User Experience Summary Proceedings Article
In: Proceedings of the 2016 IVA: WOCHAT Workshop, Zerotype, Los Angeles, CA, 2016.
@inproceedings{gordon_wochat_2016,
title = {Wochat Chatbot User Experience Summary},
author = {Carla Gordon and Jessica Tin and Jeremy Brown and Elisabeth Fritzsch and Shirley Gabber},
url = {http://workshop.colips.org/wochat/documents/ST-281.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 IVA: WOCHAT Workshop},
publisher = {Zerotype},
address = {Los Angeles, CA},
abstract = {A team of 5 interns at the USC Institute for Creative Technologies interacted with 5 of the 6 chatbots; IRIS, Sammy, Sarah, TickTock and Joker. Unfortunately no one in our team could get the 6th chatbot, pyEliza, working. We found that there were certainly some chatbots that were better than others, and some of us were surprised by how distinct each bot felt from the others. One member commented on how they felt as though each different chatbot had an individual “voice” so to speak. Others were surprised by just how much of a “personality” the bots seemed to have. Most members of our team cited IRIS as their favorite, in terms of being capable of producing naturalistic conversation, with Sammy taking a close second. However, only one member of the team was able to interact with Sarah and TickTock, but that member cited TickTock as a capable conversation partner, and Sarah as being the best bot on a number of measures including appropriateness of responses and overall conversation cohesiveness. Therefore, perhaps if more members had been able to interact with Sarah and TickTock they may have ranked higher. Lastly, Joker was by far our least favorite, with whom no member of our team was able to have anything resembling a naturalistic or even cohesive conversation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dennison, Mark; Neubauer, Cathy; Passaro, Tony; Harrison, Andre; Scherer, Stefan; Khooshabeh, Pete
Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task Proceedings Article
In: Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop, Los Angeles, CA, 2016.
@inproceedings{dennison_using_2016,
title = {Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task},
author = {Mark Dennison and Cathy Neubauer and Tony Passaro and Andre Harrison and Stefan Scherer and Pete Khooshabeh},
url = {http://marksdennison.com/s/DennisonPAVA2016.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop},
address = {Los Angeles, CA},
abstract = {Teams of two individuals worked together in a high-intensity simu-lated bomb diffusing task. Half the teams were given icebreaker social time to increase comfort and familiarity with each other and the remaining half of the teams served as controls and did not meet until the task began. Electrocardiog-raphy and impedance cardiography were recorded to examine cardiac changes during task cooperation. Changes in ventricular contractility showed that individ-uals who had taken part in the icebreaker showed increased task engagement over time whereas controls showed the opposite. Data also trended to show that ice-breaker participants were in a challenge state and controls were in a threat state during the final thirty seconds of bomb defusal. Finally, we show that a set of cardiac features can be used to classify participant data as belonging to the ice-breaker or control groups with an accuracy as high as 88%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahn, Emily; Morbini, Fabrizio; Gordon, Andrew S.
Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing Proceedings Article
In: Proceedings of the 9th International Natural Language Generation Conference (INLG-2016), Edinburgh, UK, 2016.
@inproceedings{ahn_improving_2016,
title = {Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing},
author = {Emily Ahn and Fabrizio Morbini and Andrew S. Gordon},
url = {https://www.researchgate.net/publication/307512031_Improving_Fluency_in_Narrative_Text_Generation_With_Grammatical_Transformations_and_Probabilistic_Parsing},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 9th International Natural Language Generation Conference (INLG-2016)},
address = {Edinburgh, UK},
abstract = {In research on automatic generation of narrative text, story events are often formally represented as a causal graph. When serializing and realizing this causal graph as natural language text, simple approaches produce cumbersome sentences with repetitive syntactic structure, e.g. long chains of “because” clauses. In our research, we show that the fluency of narrative text generated from causal graphs can be improved by applying rule-based grammatical transformations to generate many sentence variations with equivalent semantics, then selecting the variation that has the highest probability using a probabilistic syntactic parser. We evaluate our approach by generating narrative text from causal graphs that encode 100 brief stories involving the same three characters, based on a classic film of experimental social psychology. Crowdsourced workers judged the writing quality of texts generated with ranked transformations as significantly higher than those without, and not significantly lower than human-authored narratives of the same situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2006
Lee, Jina; Marsella, Stacy C.
Nonverbal Behavior Generator for Embodied Conversational Agents Proceedings Article
In: 6th International Conference on Intelligent Virtual Agents, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation
@inproceedings{lee_nonverbal_2006,
title = {Nonverbal Behavior Generator for Embodied Conversational Agents},
author = {Jina Lee and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Nonverbal%20Behavior%20Generator%20for%20Embodied%20Conversational%20Agents.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {6th International Conference on Intelligent Virtual Agents},
address = {Marina del Rey, CA},
abstract = {Believable nonverbal behaviors for embodied conversational agents (ECA) can create a more immersive experience for users and improve the effectiveness of communication. This paper describes a nonverbal behavior generator that analyzes the syntactic and semantic structure of the surface text as well as the affective state of the ECA and annotates the surface text with appropriate nonverbal behaviors. A number of video clips of people conversing were analyzed to extract the nonverbal behavior generation rules. The system works in real-time and is user-extensible so that users can easily modify or extend the current behavior generation rules.},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Gluck, Kevin A.; Gunzelmann, Glenn; Gratch, Jonathan; Hudlicka, Eva; Ritter, Frank E.
Modeling the Impact of Cognitive Moderators on Human Cognition and Performance Proceedings Article
In: Proceedings of the 2006 Conference of the Cognitive Society, pp. 2658, Vancouver, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gluck_modeling_2006,
title = {Modeling the Impact of Cognitive Moderators on Human Cognition and Performance},
author = {Kevin A. Gluck and Glenn Gunzelmann and Jonathan Gratch and Eva Hudlicka and Frank E. Ritter},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Impact%20of%20Cognitive%20Moderators%20on%20Human%20Cognition%20and%20Performance.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 2006 Conference of the Cognitive Society},
pages = {2658},
address = {Vancouver, CA},
abstract = {Cognitive moderators, such as emotions, personality, stress, and fatigue, represent an emerging area of research within the cognitive science community and are increasingly acknowledged as important and ubiquitous influences on cognitive processes. This symposium brings together scientists engaged in research to develop models that help us better understand the mechanisms through which these factors impact human cognition and performance. There are two unifying themes across the presentations. One theme is a commitment to developing computational models useful for simulating the processes that produce the effects and phenomena of interest. The second theme is a commitment to assessing the validity of the models by comparing their performance against empirical human data.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Okhmatovskaia, Anna; Lamothe, Francois; Marsella, Stacy C.; Morales, Mathieu; Werf, R. J.; Morency, Louis-Philippe
Virtual Rapport Proceedings Article
In: Lecture Notes in Computer Science, pp. 14–27, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_virtual_2006-1,
title = {Virtual Rapport},
author = {Jonathan Gratch and Anna Okhmatovskaia and Francois Lamothe and Stacy C. Marsella and Mathieu Morales and R. J. Werf and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Virtual%20Rapport.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Lecture Notes in Computer Science},
volume = {4311},
pages = {14–27},
address = {Marina del Rey, CA},
abstract = {Effective face-to-face conversations are highly interactive. Participants respond to each other, engaging in nonconscious behavioral mimicry and backchanneling feedback. Such behaviors produce a subjective sense of rapport and are correlated with effective communication, greater liking and trust, and greater influence between participants. Creating rapport requires a tight sense-act loop that has been traditionally lacking in embodied conversational agents. Here we describe a system, based on psycholinguistic theory, designed to create a sense of rapport between a human speaker and virtual human listener. We provide empirical evidence that it increases speaker fluency and engagement.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kopp, Stefan; Krenn, Brigitte; Marsella, Stacy C.; Marshall, Andrew; Pelachaud, Catherine; Pirker, Hannes; Thórisson, Kristinn R.; Vilhjálmsson, Hannes
Towards a Common Framework for Multimodal Generation: The Behavior Markup Language Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation
@inproceedings{kopp_towards_2006,
title = {Towards a Common Framework for Multimodal Generation: The Behavior Markup Language},
author = {Stefan Kopp and Brigitte Krenn and Stacy C. Marsella and Andrew Marshall and Catherine Pelachaud and Hannes Pirker and Kristinn R. Thórisson and Hannes Vilhjálmsson},
url = {http://ict.usc.edu/pubs/Towards%20a%20Common%20Framework%20for%20Multimodal%20Generation-%20The%20Behavior%20Markup%20Language.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference},
address = {Marina del Rey, CA},
abstract = {This paper describes an international effort to unify a multimodal behavior generation framework for Embodied Conversational Agents (ECAs). We propose a three stage model we call SAIBA where the stages represent intent planning, behavior planning and behavior realization. A Function Markup Language (FML), describing intent without referring to physical behavior, mediates between the first two stages and a Behavior Markup Language (BML)describing desired physical realization, mediates between the last two stages. In this paper we will focus on BML. The hope is that this abstraction and modularization will help ECA researchers pool their resources to build more sophisticated virtual humans.},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Ronakkumar; Leuski, Anton; Traum, David
Dealing with Out of Domain Questions in Virtual Characters Proceedings Article
In: Proceedings of the 6th International Conference on Intelligent Virtual Agents, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_dealing_2006,
title = {Dealing with Out of Domain Questions in Virtual Characters},
author = {Ronakkumar Patel and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Out%20of%20Domain%20Questions%20in%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 6th International Conference on Intelligent Virtual Agents},
address = {Marina del Rey, CA},
abstract = {We consider the problem of designing virtual characters that support speech-based interactions in a limited domain. Previously we have shown that classification can be an effective and robust tool for selecting appropriate in-domain responses. In this paper, we consider the problem of dealing with out-of-domain user questions. We introduce a taxonomy of out-of-domain response types. We consider three classification architectures for selecting the most appropriate out-of-domain responses. We evaluate these architectures and show that they significantly improve the quality of the response selection making the user?s interaction with the virtual character more natural and engaging.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
An Information State-Based Dialogue Manager for Call for Fire Dialogues Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_information_2006,
title = {An Information State-Based Dialogue Manager for Call for Fire Dialogues},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/An%20Information%20State-Based%20Dialogue%20Manager%20for%20Call%20for%20Fire%20Dialogues.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {We present a dialogue manager for "Call for Fire" training dialogues. We describe the training environment, the domain, the features of its novel information state-based dialogue manager, the system it is a part of, and preliminary evaluation results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swanson, Reid; Gordon, Andrew S.
A Comparison of Alternative Parse Tree Paths for Labeling Semantic Roles Proceedings Article
In: Proceedings of the Joint Conference of the International Committee on Computational Linguistics and the Association for Computational Linguistics (COLING/ACL), Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{swanson_comparison_2006,
title = {A Comparison of Alternative Parse Tree Paths for Labeling Semantic Roles},
author = {Reid Swanson and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/A%20Comparison%20of%20Alternative%20Parse%20Tree%20Paths%20for%20Labeling%20Semantic%20Roles.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {Proceedings of the Joint Conference of the International Committee on Computational Linguistics and the Association for Computational Linguistics (COLING/ACL)},
address = {Sydney, Australia},
abstract = {The integration of sophisticated inference-based techniques into natural language processing applications first requires a reliable methos of encoding the predicate-argument structure of the propositional context of text. Recent statistical approaches to automated predicate-argument annotaion have utilized parse tree paths as predictive features, which encode the path between a verb predicate and a node in the parse tree that governs its argument. In this paper, we explore a number of alternaitves for how these parse tree paths are encoded, focusing on the difference between automatically generated constituency parses and dependency parses. After describing five alternatives for encoding parse tree paths, we investigate how well each can be aligned with the argument substrings in annotated text corpora, their relative precision and recall performance, and their comparative learning curves. Results indicate that constituency parsers produce parse tree paths that can more easily be aligned to argument substrings, perform better in precision and recall, and have more favorable learning curves than those produced by a dependency parser.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Patel, Ronakkumar; Traum, David; Kennedy, Brandon
Building Effective Question Answering Characters Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_building_2006,
title = {Building Effective Question Answering Characters},
author = {Anton Leuski and Ronakkumar Patel and David Traum and Brandon Kennedy},
url = {http://ict.usc.edu/pubs/Building%20Effective%20Question%20Answering%20Characters.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {In this paper, we describe methods for building and evaluation of limited domain question-answering characters. Several classification techniques are tested, including text classification using support vector machines, language-model based retrieval, and cross-language information retrieval techniques, with the latter having the highest success rate. We also evaluated the effect of speech recognition errors on performance with users, finding that retrieval is robust until recognition reaches over 50% WER.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana
Cognitive and Emotive Empathy in Discourse: Towards an Integrated Theory of Mind Proceedings Article
In: Proceedings of the 28th Annual Conference of the Cognitive Society, Vancouver, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{martinovski_cognitive_2006,
title = {Cognitive and Emotive Empathy in Discourse: Towards an Integrated Theory of Mind},
author = {Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Cognitive%20and%20Emotive%20Empathy%20in%20Discourse-%20Towards%20an%20Integrated%20Theory%20of%20Mind.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {Proceedings of the 28th Annual Conference of the Cognitive Society},
address = {Vancouver, CA},
abstract = {This paper presents an empirical qualitative analysis of eliciting, giving and receiving empathy in discourse. The study identifies discursive and linguistic features, which realize cognitive, emotive, parallel and reactive empathy and suggests that imitation, simulation and representation could be non-exclusive processes in Theory of Mind reasoning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Lane, H. Chad; Lent, Michael; Gomboc, Dave; Solomon, Steve; Rosenberg, Milton
Building Explainable Artificial Intelligence Systems Proceedings Article
In: Proceedings of the 18th Innovative Applications of Artificial Intelligence Conference, Boston, MA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{core_building_2006,
title = {Building Explainable Artificial Intelligence Systems},
author = {Mark Core and H. Chad Lane and Michael Lent and Dave Gomboc and Steve Solomon and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Building%20Explainable%20Artificial%20Intelligence%20Systems.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {Proceedings of the 18th Innovative Applications of Artificial Intelligence Conference},
address = {Boston, MA},
abstract = {As artiï¬cial intelligence (AI) systems and behavior models in military simulations become increasingly complex, it has been difï¬cult for users to understand the activities of computer-controlled entities. Prototype explanation systems have been added to simulators, but designers have not heeded the lessons learned from work in explaining expert system behavior. These new explanation systems are not modular and not portable; they are tied to a particular AI system. In this paper, we present a modular and generic architecture for explaining the behavior of simulated entities. We describe its application to the Virtual Humans, a simulation designed to teach soft skills such as negotiation and cultural awareness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Riedl, Mark O.; Young, R. Michael
From Linear Story Generation to Branching Story Graphs Journal Article
In: IEEE Computer Graphics and Applications, vol. 26, no. 3, pp. 23–31, 2006.
Abstract | Links | BibTeX | Tags:
@article{riedl_linear_2006,
title = {From Linear Story Generation to Branching Story Graphs},
author = {Mark O. Riedl and R. Michael Young},
url = {http://ict.usc.edu/pubs/From%20Linear%20Story%20Generation%20to%20Branching%20Story%20Graphs.pdf},
year = {2006},
date = {2006-06-01},
journal = {IEEE Computer Graphics and Applications},
volume = {26},
number = {3},
pages = {23–31},
abstract = {Interactive narrative systems are storytelling systems in which the user can influence the content or ordering of story world events. Conceptually, an interactive narrative can be represented as a branching graph of narrative elements, implying points at which an interactive user?s decisions influence the content or ordering of the remaining elements. Generative approaches to interactive narrative construct narrative at runtime or pre-construct on a per-session basis highly interactive branching narrative structures. One generative approach ? narrative mediation ? represents story as a linear progression of events with anticipated user actions and system-controlled agent actions together in a partially-ordered plan. For every possible way the user can violate the story plan, an alternative story plan is generated. If narrative mediation is powerful enough to express the same interactive stories as systems that use branching narrative structures, then linear narrative generation techniques can be applied to interactive narrative generation. This paper lays out this argument and sketches a proof that narrative mediation is at least as powerful as acyclic branching story structures.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
McAlinden, Ryan; Lent, Michael; Clevenger, William; Tien, Wen C.
Using Environmental Annotations & Affordances to Model Culture Proceedings Article
In: Artificial Intelligence and Interactive Digital Entertainment Conference Demonstrations, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{mcalinden_using_2006,
title = {Using Environmental Annotations & Affordances to Model Culture},
author = {Ryan McAlinden and Michael Lent and William Clevenger and Wen C. Tien},
url = {http://ict.usc.edu/pubs/Using%20Environmental%20Annotations%20&%20Affordances%20to%20Model%20Culture.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Artificial Intelligence and Interactive Digital Entertainment Conference Demonstrations},
address = {Marina del Rey, CA},
abstract = {This paper details the demonstration of an annotation and affordance-based software model intended to introduce cultural and social influences into a non-player character's (NPC) decision-making process. We describe how recent research has supported the need to begin incorporating the effects of culture into the interactive digital domain. The technical approach is presented that describes the software techniques for embedding and utilizing culturally-specific information inside of a virtual environment, as well as the design and implementation of a deterministic Markov Decision Process (MDP) to model the affects of culture on the AI.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Callieri, Marco; Debevec, Paul; Scopigno, Roberto
A realtime immersive application with realistic lighting: The Parthenon Journal Article
In: Computers & Graphics, vol. 30, no. 3, pp. 368–376, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@article{callieri_realtime_2006,
title = {A realtime immersive application with realistic lighting: The Parthenon},
author = {Marco Callieri and Paul Debevec and Roberto Scopigno},
url = {http://ict.usc.edu/pubs/A%20realtime%20immersive%20application%20with%20realistic%20lighting-%20The%20Parthenon.pdf},
year = {2006},
date = {2006-06-01},
journal = {Computers & Graphics},
volume = {30},
number = {3},
pages = {368–376},
abstract = {Offline rendering techniques have nowadays reached an astonishing level of realism but pay the cost of long computational times. The new generation of programmable graphic hardware, on the other hand, gives the possibility to implement in realtime some of the visual effects previously available only for cinematographic production. We describe the design and implementation of an interactive system which is able to reproduce in realtime one of the crucial sequences from the short movie “The Parthenon” presented at Siggraph 2004. The application is designed to run on a specific immersive reality system, making possible for a user to perceive the virtual environment with nearly cinematographic visual quality.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rogers, Steven A.; Braaten, Alyssa J.; Woods, Steven Paul; Tröster, Alexander I.
Cognitive sequelae of subthalamic nucleus deep brain stimulation in Parkinson's disease: a meta-analysis Journal Article
In: Lancet Neurology, vol. 5, pp. 578–588, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_cognitive_2006,
title = {Cognitive sequelae of subthalamic nucleus deep brain stimulation in Parkinson's disease: a meta-analysis},
author = {Thomas D. Parsons and Steven A. Rogers and Alyssa J. Braaten and Steven Paul Woods and Alexander I. Tröster},
url = {http://ict.usc.edu/pubs/Cognitive%20sequelae%20of%20subthalamic%20nucleus%20deep%20brain%20stimulation%20in%20Parkinson%E2%80%99s%20disease-%20a%20meta-analysis.pdf},
year = {2006},
date = {2006-06-01},
journal = {Lancet Neurology},
volume = {5},
pages = {578–588},
abstract = {Summary: Background Deep brain stimulation of the subthalamic nucleus (STN DBS) is an increasingly common treatment for Parkinson's disease. Qualitative reviews have concluded that diminished verbal fluency is common after STN DBS, but that changes in global cognitive abilities, attention, executive functions, and memory are only inconsistently observed and, when present, often nominal or transient. We did a quantitative meta-analysis to improve understanding of the variability and clinical signiï¬cance of cognitive dysfunction after STN DBS. Methods: We searched MedLine, PsycLIT, and ISI Web of Science electronic databases for articles published between 1990 and 2006, and extracted information about number of patients, exclusion criteria, conï¬rmation of target by microelectrode recording, veriï¬cation of electrode placement via radiographic means, stimulation parameters, assessment time points, assessment measures, whether patients were on levodopa or dopaminomimetics, and summary statistics needed for computation of effect sizes. We used the random-effects meta-analytical model to assess continuous outcomes before and after STN DBS. Findings: Of 40 neuropsychological studies identiï¬ed, 28 cohort studies (including 612 patients) were eligible for inclusion in the meta-analysis. After adjusting for heterogeneity of variance in study effect sizes, the random effects meta-analysis revealed signiï¬cant, albeit small, declines in executive functions and verbal learning and memory. Moderate declines were only reported in semantic (Cohen's d 0·73) and phonemic verbal fluency (0·51). Changes in verbal fluency were not related to patient age, disease duration, stimulation parameters, or change in dopaminomimetic dose after surgery. Interpretation: STN DBS, in selected patients, seems relatively safe from a cognitive standpoint. However, diffculty in identiï¬cation of factors underlying changes in verbal fluency draws attention to the need for uniform and detailed reporting of patient selection, demographic, disease, treatment, surgical, stimulation, and clinical outcome parameters.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Dini, Don M.; Lent, Michael; Carpenter, Paul; Iyer, Kumar
Building Robust Planning and Execution Systems for Virtual Worlds Proceedings Article
In: Proceedings of Artificial Intelligence and Interactive Digital Entertainment, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{dini_building_2006,
title = {Building Robust Planning and Execution Systems for Virtual Worlds},
author = {Don M. Dini and Michael Lent and Paul Carpenter and Kumar Iyer},
url = {http://ict.usc.edu/pubs/Building%20Robust%20Planning%20and%20Execution%20Systems%20for%20Virtual%20Worlds.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Proceedings of Artificial Intelligence and Interactive Digital Entertainment},
address = {Marina del Rey, CA},
abstract = {Planning and execution systems have been used in a wide varietyof systems to create practical and successful automation. Theyhave been used for everything from performing scientific researchon the surface of Mars to controlling enemy characters in video games to performing military air campaign planning. After reviewing past work on these various planning and executionsystems, we believe that most lack one or more key componentscontained in another system. To enable future researchers to build more complete systems, and avoid possible serious system failure, we identify the major technical problems any implementer of such a system would have to face. In addition wecite recent solutions to each of these technical problems. We limit our focus to planning and execution for virtual worlds and theunique problems faced therein.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Riedl, Mark O.; Stern, Andrew; Dini, Don M.
Mixing Story and Simulation in Interactive Narrative Proceedings Article
In: 2nd Conference on Artificial Intelligence and Interactive Entertainment (AIIDE), Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{riedl_mixing_2006,
title = {Mixing Story and Simulation in Interactive Narrative},
author = {Mark O. Riedl and Andrew Stern and Don M. Dini},
url = {http://ict.usc.edu/pubs/Mixing%20Story%20and%20Simulation%20in%20Interactive%20Narrative.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {2nd Conference on Artificial Intelligence and Interactive Entertainment (AIIDE)},
address = {Marina del Rey, CA},
abstract = {Simulation is a common feature in computer entertainment. However, in computer games simulation and story are often kept distinct by interleaving interactive play and cut scenes. We describe a technique for an interactive narrative system that more closely integrates simulation and storyline. The technique uses a combination of semi-autonomous character agents and high-level story direction. The storyline is decomposed into directives to character agents to achieve particular world states. Otherwise, character agents are allowed to behave autonomously. When the player?s actions create inconsistency between the simulation state and storyline, the storyline is dynamically adapted and repaired to resolve any inconsistencies.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Einarsson, Per; Chabert, Charles-Felix; Jones, Andrew; Ma, Wan-Chun; Lamond, Bruce; Hawkins, Tim; Bolas, Mark; Sylwan, Sebastian; Debevec, Paul
Relighting Human Locomotion with Flowed Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering (2006), 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{einarsson_relighting_2006,
title = {Relighting Human Locomotion with Flowed Reflectance Fields},
author = {Per Einarsson and Charles-Felix Chabert and Andrew Jones and Wan-Chun Ma and Bruce Lamond and Tim Hawkins and Mark Bolas and Sebastian Sylwan and Paul Debevec},
url = {http://ict.usc.edu/pubs/Relighting%20Human%20Locomotion%20with%20Flowed%20Reflectance%20Fields.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Eurographics Symposium on Rendering (2006)},
abstract = {We present an image-based approach for capturing the appearance of a walking or running person so they can be rendered realistically under variable viewpoint and illumination. In our approach, a person walks on a treadmill at a regular rate as a turntable slowly rotates the person's direction. As this happens, the person is filmed with a vertical array of high-speed cameras under a time-multiplexed lighting basis, acquiring a seven-dimensional dataset of the person under variable time, illumination, and viewing direction in approximately forty seconds. We process this data into a flowed reflectance field using an optical flow algorithm to correspond pixels in neighboring camera views and time samples to each other, and we use image compression to reduce the size of this data.We then use image-based relighting and a hardware-accelerated combination of view morphing and light field rendering to render the subject under user-specified viewpoint and lighting conditions. To composite the person into a scene, we use an alpha channel derived from back lighting and a retroreflective treadmill surface and a visual hull process to render the shadows the person would cast onto the ground. We demonstrate realistic composites of several subjects into real and virtual environments using our technique.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters Proceedings Article
In: 11th International Fall Workshop on Vision, Modeling and Visualization, Aachen, Germany, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{tariq_efficient_2006-1,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/Efficient%20Estimation%20of%20Spatially%20Varying%20Subsurface%20Scattering%20Parameters.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {11th International Fall Workshop on Vision, Modeling and Visualization},
address = {Aachen, Germany},
abstract = {We present an image-based technique to efficiently acquire spatially varying subsurface reflectance properties of a human face. The estimated prop- erties can be used directly to render faces with spa- tially varying scattering, or can be used to estimate a robust average across the face. We demonstrate our technique with renderings of peoples' faces un- der novel, spatially-varying illumination and pro- vide comparisons with current techniques. Our cap- tured data consists of images of the face from a sin- gle viewpoint under two small sets of projected im- ages. The first set, a sequence of phase-shifted pe- riodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The sec- ond set of structured light patterns is used to obtain face geometry. We subtract the minimum of each profile to remove the contribution of interreflected light from the rest of the face, and then match the observed reflectance profiles to scattering properties predicted by a scattering model using a lookup ta- ble. From these properties we can generate images of the subsurface reflectance of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Egges, Arjan; Eliëns, Anton; Isbister, Katherine; Paiva, Ana; Rist, Thomas; Hagen, Paul
Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans Proceedings Article
In: Dagstuhl Seminar Proceedings, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_design_2006,
title = {Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans},
author = {Jonathan Gratch and Stacy C. Marsella and Arjan Egges and Anton Eliëns and Katherine Isbister and Ana Paiva and Thomas Rist and Paul Hagen},
url = {http://ict.usc.edu/pubs/Design%20criteria%20techniques%20and%20case%20studies%20for%20creating%20and%20evaluating%20interactive%20experiences%20for%20virtual%20humans.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Dagstuhl Seminar Proceedings},
abstract = {How does one go about designing a human? With the rise in recent years of virtual humans this is no longer purely a philosophical question. Virtual humans are intelligent agents with a body, often a human-like graphical body, that interact verbally and non-verbally with human users on a variety of tasks and applications. At a recent meeting on this subject, the above authors participated in a several day discussion on the question of virtual human design. Our working group approached this question from the perspective of interactivity. Specifically, how can one design effective interactive experiences involving a virtual human, and what constraints does this goal place on the form and function of an embodied conversational agent. Our group grappled with several related questions: What ideals should designers aspire to, what sources of theory and data will best lead to this goal and what methodologies can inform and validate the design process? This article summarizes our output and suggests a specific framework, borrowed from interactive media design, as a vehicle for advancing the state of interactive experiences with virtual humans.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Tucker, Karen A.; Hall, Colin D.; Robertson, Wendy T.; Eron, Joseph J.; Fried, Michael W.; Robertson, R. Kevin
Neurocognitive functioning and HAART in HIV and hepatitis C virus co-infection Journal Article
In: AIDS, vol. 20, pp. 1591–1595, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_neurocognitive_2006,
title = {Neurocognitive functioning and HAART in HIV and hepatitis C virus co-infection},
author = {Thomas D. Parsons and Karen A. Tucker and Colin D. Hall and Wendy T. Robertson and Joseph J. Eron and Michael W. Fried and R. Kevin Robertson},
url = {http://ict.usc.edu/pubs/Neurocognitive%20functioning%20and%20HAART%20in%20HIV%20and%20hepatitis%20C%20virus%20co-infection.pdf},
year = {2006},
date = {2006-05-01},
journal = {AIDS},
volume = {20},
pages = {1591–1595},
abstract = {Objectives: This study examined the effects of HAART on neurocognitive functioning in persons with hepatitis C virus (HCV) and HIV co-infection. Design: A prospective study examining neurocognitive performance before and after HAART initiation. Method: Participant groups included a mono-infected group (45 HIV/HCV-participants) and a co-infected group (20 HIV/HCV participants). A neuropsychological battery (attention/concentration, psychomotor speed, executive functioning, verbal memory, visual memory, ï¬ne motor, and gross motor functioning) was used to evaluate all participants. After 6 months of HAART, 31 HIV mono-infected and 13 HCV/ HIV co-infected participants were reevaluated. Results: Neurocognitive functioning by domain revealed signiï¬cantly worse performance in the co-infected group when compared to the monoinfected group on domains of visual memory and ï¬ne motor functioning. Assessment of neurocognitive functioning after antiretroviral therapy revealed that the co-infected group was no longer performing worse than the monoinfected group. Conclusions: The ï¬ndings of the current study suggest that persons with HCV/HIV co-infection may have greater neurocognitive declines than persons with HIV infection alone. HCV/HIV co-infection may accelerate the progression of HIV related neurocognitive decline.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Riedl, Mark O.; Stern, Andrew
Believable Agents and Intelligent Scenario Direction for Social and Cultural Leadership Training Proceedings Article
In: 15th Conference on Behavior Representation in Modeling and Simulation (BRIMS), Baltimore, MD, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{riedl_believable_2006,
title = {Believable Agents and Intelligent Scenario Direction for Social and Cultural Leadership Training},
author = {Mark O. Riedl and Andrew Stern},
url = {http://ict.usc.edu/pubs/Believable%20Agents%20and%20Intelligent%20Scenario%20Direction%20for%20Social%20and%20Cultural%20Leadership%20Training.pdf},
year = {2006},
date = {2006-05-01},
booktitle = {15th Conference on Behavior Representation in Modeling and Simulation (BRIMS)},
address = {Baltimore, MD},
abstract = {Simulation provides an opportunity for a trainee to practice skills in an interactive and reactive virtual environment. We present a technique for social and cultural leader training through simulation based on a combination of interactive synthetic agents and intelligent scenario direction and adaptation. Social simulation through synthetic characters provides an engaging and believable experience for the trainee. In addition, the trainee is exposed to a sequence of relevant learning situations where the trainee can practice problem-solving under particular conditions. An Automated Scenario Director provides high-level guidance to semi-autonomous character agents to coerce the trainee's experience to conform to a given scenario. When the trainee performs actions in the virtual world that cause the simulation state to deviate from the scenario, the Automated Scenario Director adapts the scenario to resolve any unexpected inconsistencies, thereby preserving the trainee's perception of self control while still retaining any relevant learning situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
EMA: A computational model of appraisal dynamics Proceedings Article
In: Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion, Vienna, Austria, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{marsella_ema_2006,
title = {EMA: A computational model of appraisal dynamics},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/EMA-%20A%20computational%20model%20of%20appraisal%20dynamics.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion},
address = {Vienna, Austria},
abstract = {A computational model of emotion must explain both the rapid dynamics of some emotional reactions as well as the slower responses that follow deliberation. This is often addressed by positing multiple appraisal processes such as fast pattern directed vs. slower deliberative appraisals. In our view, this confuses appraisal with inference. Rather, we argue for a single and automatic appraisal process that operates over a person’s interpretation of their relationship to the environment. Dynamics arise from perceptual and inferential processes operating on this interpretation (including deliberative and reactive processes). We illustrate this perspective through the computational modeling of a naturalistic emotional situation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents Book Section
In: Modeling Communication with Robots and Virtual Humans, pp. 296–309, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{traum_talking_2006,
title = {Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Talking%20to%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Modeling Communication with Robots and Virtual Humans},
pages = {296–309},
abstract = {Virtual Humans are artificial characters who look and act like humans, but inhabit a simulated environment. One important aspect of many virtual humans is their communicative dialogue ability. In this paper we outline a methodology for study of dialogue behavior and construction of virtual humans. We also consider three architectures for different types of virtual humans that have been built at the Institute for Creative Technologies.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Bolas, Mark; Pair, Jarrell; Haynes, Kip; McDowall, Ian
Display Research at the University of Southern California Proceedings Article
In: IEEE Emerging Displays Workshop, Alexandria, VA, 2006.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{bolas_display_2006,
title = {Display Research at the University of Southern California},
author = {Mark Bolas and Jarrell Pair and Kip Haynes and Ian McDowall},
url = {http://ict.usc.edu/pubs/Display%20Research%20at%20the%20University%20of%20Southern%20California.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {IEEE Emerging Displays Workshop},
address = {Alexandria, VA},
abstract = {The University of Southern California and its collaborative research partner, Fakespace Labs, are participating in a number of research programs to invent and implement new forms of display technologies for immersive and semi-immersive applications. This paper briefly describes three of these technologies and highlights a few emerging results from those efforts. The first system is a rear projected 300 degree field of view cylindrical display. It is driven by 11 projectors with geometry correction and edge blending hardware. A full scale prototype will be completed in March 2006. The second system is a 14 screen projected panoramic room environment used as an advanced teaching and meeting space. It can be driven by a cluster of personal computers or low-cost DVD players, or driven by a single personal computer. The third is a prototype stereoscopic head mounted display that can be worn in a fashion similar to standard dust protection goggles. It provides a field of view in excess of 150 degrees.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Graap, Ken; Manson, Brian; McNerney, Peter J.; Wiederhold, Brenda K.; Wiederhold, Mark; Spira, James
A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment Proceedings Article
In: NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment},
author = {Albert Rizzo and Jarrell Pair and Ken Graap and Brian Manson and Peter J. McNerney and Brenda K. Wiederhold and Mark Wiederhold and James Spira},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Military%20Personnel%20with%20Post%20Traumatic%20Stress%20Disorder-%20From%20Training%20to%20Toy%20to%20Treatment.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder},
abstract = {Post Traumatic Stress Disorder is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of a Virtual Iraq PTSD VR application that has been created from the virtual assets that were initially developed for a combat tactical training simulation, which then served as the inspiration for the X-Box game entitled Full Spectrum Warrior.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Allen, Brian; Dautricourt, Matthieu; Treskunov, Anton; Liewer, Matt; Graap, Ken; Reger, Greg; Rizzo, Albert
A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the IEEE VR 2006 Conference, pp. 64–71, Alexandria, VA, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{pair_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder},
author = {Jarrell Pair and Brian Allen and Matthieu Dautricourt and Anton Treskunov and Matt Liewer and Ken Graap and Greg Reger and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Post%20Traumatic%20Stress%20Disorder.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {Proceedings of the IEEE VR 2006 Conference},
pages = {64–71},
address = {Alexandria, VA},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-Centered tests with the application are currently underway at the Naval Medical Center–San Diego and within an Army Combat Stress Control Team in Iraq with clinical trials scheduled to commence in February 2006.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Miller, Karen J.; Parsons, Thomas D.; Whybrow, Peter C.; Herle, Katja; Rasgon, Natalie; Herle, Andre; Martinez, Dorothy; Silverman, Dan H.; Bauer, Michael
Memory Improvement with Treatment of Hypothyroidism Journal Article
In: International Journal of Neuroscience, vol. 16, no. 8, pp. 895–906, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{miller_memory_2006,
title = {Memory Improvement with Treatment of Hypothyroidism},
author = {Karen J. Miller and Thomas D. Parsons and Peter C. Whybrow and Katja Herle and Natalie Rasgon and Andre Herle and Dorothy Martinez and Dan H. Silverman and Michael Bauer},
url = {http://ict.usc.edu/pubs/Memory%20Improvement%20with%20Treatment%20of%20Hypothyroidism.pdf},
year = {2006},
date = {2006-01-01},
journal = {International Journal of Neuroscience},
volume = {16},
number = {8},
pages = {895–906},
abstract = {The consequences of inadequate thyroid hormone availability to the brain and treatment effects of levothyroxine function are still poorly understood. This study prospectively assessed the effects of thyroid replacement therapy on cognitive function in patients suffering from biochemical evidenced, untreated hypothyroidism. Significant effects between the untreated hypothyroid group and control group were limited to verbal memory retrieval. When assessing the effects of 3-month treatment, results revealed that the treated hypothyroid group had significant increased verbal memory retrieval. Results suggest that specific memory retrieval deficits associated with hypothyroidism can resolve after replacement therapy with levothyroxine.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Toward Virtual Humans Journal Article
In: AI Magazine, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{swartout_toward_2006,
title = {Toward Virtual Humans},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/Toward%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-01-01},
journal = {AI Magazine},
abstract = {This paper describes the virtual humans developed as part of the Mission Rehearsal Exercise project, a virtual reality-based training system. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. We describe the key capabilities of the virtual humans, including task representation and reasoning, natural language dialogue, and emotion reasoning, and show how these capabilities are integrated to provide more human-level intelligence than would otherwise be possible.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mao, Wenji; Gratch, Jonathan
Evaluating a Computational Model of Social Causality and Responsibility Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Hakodate, Japan, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_evaluating_2006,
title = {Evaluating a Computational Model of Social Causality and Responsibility},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20Computational%20Model%20of%20Social%20Causality%20and%20Responsibility.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Hakodate, Japan},
abstract = {Intelligent agents are typically situated in a social environment and must reason about social cause and effect. Such reasoning is qualitatively different from physical causal reasoning that underlies most intelligent systems. Modeling social causal reasoning can enrich the capabilities of multi-agent systems and intelligent user interfaces. In this paper, we empirically evaluate a computational model of social causality and responsibility against human social judgments. Results from our experimental studies show that in general, the model's predictions of internal variables and inference process are consistent with human responses, though they also suggest some possible refinement to the computational model.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Soo-Min; Hovy, Eduard
Identifying and Analyzing Judgment Opinions Proceedings Article
In: Proceedings of the Humans Language Technology/North American Association of Computational Linguistics Conference, New York, NY, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{kim_identifying_2006,
title = {Identifying and Analyzing Judgment Opinions},
author = {Soo-Min Kim and Eduard Hovy},
url = {http://ict.usc.edu/pubs/Identifying%20and%20Analyzing%20Judgment%20Opinions.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the Humans Language Technology/North American Association of Computational Linguistics Conference},
address = {New York, NY},
abstract = {In this paper, we introduce a methodology for analyzing judgment opinions. We define a judgment opinion as consisting of a valence, a holder, and a topic. We decompose the task of opinion analysis into four parts: 1) recognizing the opinion; 2) identifying the valence; 3) identifying the holder; and 4) identifying the topic. In this paper, we address the first three parts and evaluate our methodology using both intrinsic and extrinsic measures},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Clevenger, William
A Culturally-enhanced Environmental Framework for Virtual Environments Proceedings Article
In: Proceedings of Behavior Representation in Modeling and Simulation, Baltimore, MD, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{mcalinden_culturally-enhanced_2006,
title = {A Culturally-enhanced Environmental Framework for Virtual Environments},
author = {Ryan McAlinden and William Clevenger},
url = {http://ict.usc.edu/pubs/A%20Culturally-enhanced%20Environmental%20Framework%20for%20Virtual%20Environments.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of Behavior Representation in Modeling and Simulation},
address = {Baltimore, MD},
abstract = {This paper details the design and implementation of an embedded environmental framework that introduces cultural and social influences into a simulation agent's decision-making process. We describe the current limitations associated with accurately representing culture in virtual environments and military simulations, and how recent research in other academic fields have enabled computational techniques to begin incorporating the effects of culture into AI and behavior subsystems. The technical approach is presented that describes the design and implementation of a hierarchical data model, as well as the software techniques for embedding culturally-specific information inside of a virtual environment. Finally, future work is discussed for developing a more comprehensive and standardized approach for embedding this culturally-specific information inside of the virtual domain.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Braaten, Alyssa J.; Parsons, Thomas D.; McCue, Robert; Sellers, Alfred; Burns, William J.
In: International Journal of Neuroscience, vol. 116, pp. 1271–1293, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{braaten_neurocognitive_2006,
title = {Neurocognitive Differential Diagnosis of Dementing Diseases: Alzheimers Demntia, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder},
author = {Alyssa J. Braaten and Thomas D. Parsons and Robert McCue and Alfred Sellers and William J. Burns},
url = {http://ict.usc.edu/pubs/NEUROCOGNITIVE%20DIFFERENTIAL%20DIAGNOSIS%20OF%20DEMENTING%20DISEASES-%20ALZHEIMER%E2%80%99S%20DEMENTIA,%20VASCULAR%20DEMENTIA,%20FRONTOTEMPORAL%20DEMENTIA,%20AND%20MAJOR%20DEPRESSIVE%20DISORDER.pdf},
year = {2006},
date = {2006-01-01},
journal = {International Journal of Neuroscience},
volume = {116},
pages = {1271–1293},
abstract = {Similarities in presentation of Dementia of Alzheimer's Type, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder, pose differential diagnosis challenges. The current study identiï¬es speciï¬c neuropsychological patterns of scores for Dementia of Alzheimer's Type, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder. Neuropsychological domains directly assessed in the study included: immediate memory, delayed memory, confrontational naming, verbal fluency, attention, concentration, and executive functioning. The results reveal speciï¬c neuropsychological comparative proï¬les for Dementia of Alzheimer's Type, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder. The identiï¬cation of these proï¬les will assist in the differential diagnosis of these disorders and aid in patient treatment.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Lane, H. Chad; Core, Mark; Gomboc, Dave; Solomon, Steve; Lent, Michael; Rosenberg, Milton
Reflective Tutoring for Immersive Simulation Proceedings Article
In: Proceedings of the 8th International Conference on Intelligent Tutoring Systems, Jhongli, Taiwan, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{lane_reflective_2006,
title = {Reflective Tutoring for Immersive Simulation},
author = {H. Chad Lane and Mark Core and Dave Gomboc and Steve Solomon and Michael Lent and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Reflective%20Tutoring%20for%20Immersive%20Simulation.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 8th International Conference on Intelligent Tutoring Systems},
address = {Jhongli, Taiwan},
abstract = {Reflection is critically important for time-constrained training simulations that do not permit extensive tutor-student interactions during an exercise. Here, we describe a reflective tutoring system for a virtual human simulation of negotiation. The tutor helps students review their exercise, elicits where and how they could have done better, and uses explainable artificial intelligence (XAI) to allow students the chance to ask questions about the virtual human's behavior.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Graap, Ken; Pair, Jarrell; Reger,; Treskunov, Anton; Parsons, Thomas D.
User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder Proceedings Article
In: Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology, Esbjerg, Denmark, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_user-centered_2006,
title = {User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder},
author = {Albert Rizzo and Ken Graap and Jarrell Pair and Reger and Anton Treskunov and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/User-centered%20design%20driven%20development%20of%20a%20virtual%20reality%20therapy%20application%20for%20Iraq%20war%20combat-related%20post%20traumatic%20stress%20disorder.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Esbjerg, Denmark},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Buxbaum, Laurel J.; Palermo, Maryann; Mastrogiovanni, Dina; Read, Mary Schmidt; Rosenberg-Pitonyak, Ellen; Rizzo, Albert; Coslett, H. Branch
Assessment of Spatial Neglect with a Virtual Wheelchair Navigation Task Proceedings Article
In: 5th Annual International Workshop on Virtual Rehabilitation, New York, NY, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{buxbaum_assessment_2006,
title = {Assessment of Spatial Neglect with a Virtual Wheelchair Navigation Task},
author = {Laurel J. Buxbaum and Maryann Palermo and Dina Mastrogiovanni and Mary Schmidt Read and Ellen Rosenberg-Pitonyak and Albert Rizzo and H. Branch Coslett},
url = {http://ict.usc.edu/pubs/Assessment%20of%20Spatial%20Neglect%20with%20a%20Virtual%20Wheelchair%20Navigation%20Task.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {5th Annual International Workshop on Virtual Rehabilitation},
address = {New York, NY},
abstract = {We report data from 9 participants with right hemisphere stroke on a new virtual reality (VR) wheelchair navigation test designed to assess lateralized spatial attention and neglect. The test consists of a virtual winding path along which participants must navigate (or be navigated by an experimenter) as they name objects encountered along the way. There are 4 VR task conditions, obtained by crossing the factors array complexity (Simple, Complex) and Driver (Participant, Experimenter). Participants performed the VR task, a real-life wheelchair navigation task, and a battery of tests assessing arousal, visual attention under secondary task demands, and neglect. The VR test showed sensitivity to both array complexity and driver, with best performance occurring in the Experimenter Navigated, Simple Array condition. The VR test also showed high correlations with the wheelchair navigation test, and these correlations were in many instances higher than those between traditional clinical neglect tests and the wheelchair navigation task. Moreover, the VR test detected lateralized attention deficits in participants whose performance was within the normal range on other neglect tests. We conclude that the VR task is sensitive to factors likely to affect the severity of neglect in the daily environment, and shows promise as an efficient, easily administered measure of real-life wheelchair navigation.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Traum, David
Improving Question-Answering With Linking Dialogues Proceedings Article
In: International Conference on Intelligent User Interfaces (IUI-2006), Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gandhe_improving_2006,
title = {Improving Question-Answering With Linking Dialogues},
author = {Sudeep Gandhe and Andrew S. Gordon and David Traum},
url = {http://ict.usc.edu/pubs/Improving%20Question-Answering%20With%20Linking%20Dialogues%20.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Intelligent User Interfaces (IUI-2006)},
address = {Sydney, Australia},
abstract = {Question-answering dialogue systems have found many applications in interactive learning environments. This paper is concerned with one such application for Army leadership training, where trainees input free-text questions that elicit pre-recorded video responses. Since these responses are already crafted before the question is asked, a certain degree of incoherence exists between the question that is asked and the answer that is given. This paper explores the use of short linking dialogues that stand in between the question and its video response to alleviate the problem of incoherence. We describe a set of experiments with human generated linking dialogues that demonstrate their added value. We then describe our implementation of an automated method for utilizing linking dialogues and show that these have better coherence properties than the original system without linking dialogues.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Wenji, Mao
Towards a Validated Model of "Emotional Intelligence" Proceedings Article
In: Proceedings of the 21st National Conference on Artificial Intelligence, pp. 1613–1616, Boston, MA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_towards_2006,
title = {Towards a Validated Model of "Emotional Intelligence"},
author = {Jonathan Gratch and Stacy C. Marsella and Mao Wenji},
url = {http://ict.usc.edu/pubs/Towards%20a%20Validated%20Model%20of%20Emotional%20Intelligence.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 21st National Conference on Artificial Intelligence},
volume = {2},
pages = {1613–1616},
address = {Boston, MA},
abstract = {This article summarizes recent progress in developing a validated computational account of the cognitive antecedents and consequences of emotion. We describe the potential of this work to impact a variety of AI problem domains.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tröster, Alexander I.; Parsons, Thomas D.
Sodium Amytal Testing and Language Journal Article
In: Encyclopedia of Language and Linguistics, vol. 11, pp. 500–503, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{troster_sodium_2006,
title = {Sodium Amytal Testing and Language},
author = {Alexander I. Tröster and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/Sodium%20Amytal%20Testing%20and%20Language.pdf},
year = {2006},
date = {2006-01-01},
journal = {Encyclopedia of Language and Linguistics},
volume = {11},
pages = {500–503},
abstract = {The intracarotid amobarbital test (IAT) was first described by Juhn Wada and thus is often referred to as the 'Wada test.' Wada originally developed this technique to study the interhemispheric spread of epileptiform discharges in patients undergoing unilateral electroconvulsive therapy. Based on his observation that an expressive aphasia resulted when the language dominant hemisphere was injected with amobarbital, he reasoned that this technique might be useful in determining hemispheric language dominance in neurosurgical candidates (and thus minimize speech and language dysfunction in patients undergoing dominant hemisphere surgery).},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2006, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{tariq_efficient_2006,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 01 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present an image-based technique to rapidly ac- quire spatially varying subsurface reflectance prop- erties of a human face. The estimated properties can be used directly to render faces with spatially vary- ing scattering, or can be used to estimate a robust average across the face. We demonstrate our tech- nique with renderings of peoples' faces under novel, spatially-varying illumination and provide compar- isons with current techniques. Our captured data consists of images of the face from a single view- point under two small sets of projected images. The first set, a sequence of phase shifted periodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The second set contains structured light and is used to obtain face geometry. We match the observed reflectance pro- files to scattering properties predicted by a scatter- ing model using a lookup table. From these prop- erties we can generate images of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Nelson, Nathaniel W.; Parsons, Thomas D.; Grote, Christopher L.; Smith, Clifford A.; II, James R. Sisung
The MMPI-2 Fake Bad Scale: Concordance and Specificity of True and Estimated Scores Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 28, pp. 1–12, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{nelson_mmpi-2_2006,
title = {The MMPI-2 Fake Bad Scale: Concordance and Specificity of True and Estimated Scores},
author = {Nathaniel W. Nelson and Thomas D. Parsons and Christopher L. Grote and Clifford A. Smith and James R. Sisung II},
url = {http://ict.usc.edu/pubs/The%20MMPI-2%20Fake%20Bad%20Scale-%20Concordance%20and%20Specificity%20of%20True%20and%20Estimated%20Scores.pdf},
doi = {10.1080/13803390490919272},
year = {2006},
date = {2006-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {28},
pages = {1–12},
series = {1380-339},
abstract = {A number of recent studies have supported the use of the MMPI-2 Fake Bad Scale (FBS) as a measure of negative response bias, the scale at times demonstrating greater sensitivity to negative response bias than other MMPI-2 validity scales. However, clinicians may not always have access to True FBS (T-FBS) scores, such as when True-False answer sheets are unavailable or published research studies do not report FBS raw scores. Under these conditions, Larrabee (2003a) suggests a linear regression formula that provides estimated FBS (E-FBS) scores derived from weighted validity and clinical T-Scores. The present study intended to validate this regression formula of MMPI-2 E-FBS scores and demonstrate its specificity in a sample of non-litigating, clinically referred, medically intractable epilepsy patients. We predicted that the E-FBS scores would correlate highly (textbackslashtextbackslashtextbackslashtextbackslashtextgreater.70) with the T-FBS scores, that the E-FBS would show comparable correlations with MMPI-2 validity and clinical scales relative to the T-FBS, and that the E-FBS would show an adequate ability to match T-FBS scores using a variety of previously suggested T-FBS raw score cutoffs. Overall, E-FBS scores correlated very highly with T-FBS scores (r = .78, p textbackslashtextbackslashtextbackslashtextbackslashtextless .0001), though correlations were especially high for women (r = .85, p textbackslashtextbackslashtextbackslashtextbackslashtextless .0001) compared to men (r = .62, p textbackslashtextbackslashtextbackslashtextbackslashtextless .001). Thirty-one of 32 (96.9%) comparisons made between E-FBS/T-FBS correlates with other MMPI-2 scales were nonsignificant. When matching to T-FBS "high" and "low" scores, the E-FBS scores demonstrated the highest hit rate (92.5%) through use of Lees-Haley's (1992) revised cutoffs for men and women. These same cutoffs resulted in excellent overall specificity for both the T-FBS scores (92.5%) and E-FBS scores (90.6%). The authors conclude that the E-FBS represents an adequate estimate of T-FBS scores in the current epilepsy sample. Use of E-FBS scores may be especially useful when clinicians conduct the MMPI-2 short form, which does not include all of the 43 FBS items but does include enough items to compute each of the validity and clinical T-Scores. Future studies should examine E-FBS sensitivity in compensation-seekers with incomplete effort.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Parsons, Thomas D.; Sidtis, John J.; Inman, Tina Hanlon; Robertson, Wendy T.; Hall, Colin D.; Price, Richard W.
Timed Gait Test: Normative Data for the Assessment of the AIDS Dementia Complex Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 28, pp. 1053–1064, 2006, ISSN: 1380-3395.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_timed_2006,
title = {Timed Gait Test: Normative Data for the Assessment of the AIDS Dementia Complex},
author = {R. Kevin Robertson and Thomas D. Parsons and John J. Sidtis and Tina Hanlon Inman and Wendy T. Robertson and Colin D. Hall and Richard W. Price},
url = {http://ict.usc.edu/pubs/Timed%20Gait%20Test-%20Normative%20Data%20for%20the%20Assessment%20of%20the%20AIDS%20Dementia%20Complex.pdf},
doi = {10.1080/13803390500205684},
issn = {1380-3395},
year = {2006},
date = {2006-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {28},
pages = {1053–1064},
abstract = {The Timed Gait test is a standardized procedure assessing motor dysfunction of lower extremities and gait abnormalities associated with AIDS dementia complex. Heretofore, interpretations of Timed Gait results have been hampered by the lack of normative data. We provide results on this test derived from 1,549 subjects (HIV-seronegatives (HIV-) and seropositives (HIV+) classified according to ADC stage). Timed Gait was found to be a useful screening and assessment tool for evaluating ADC and correlated with clinical ADC staging as well as more extensive structured neurological and neuropsychological evaluations. Analysis of covariance results (with age and education as covariates) revealed symptomatic HIV+(SX) and AIDS groups having significantly slower Timed Gait scores than those in the HIV– and asymptomatic HIV+(ASX) groups. The SX group obtained significantly slower timed gait scores than those in the AIDS group. There was a significant increase in Timed Gait scores with each increase in dementia staging with the HIV- subjects having the fastest mean Timed Gait scores and the HIV+ dementia stage 2+ having the slowest. These normative data should prove useful in both recognition of ADC and treatment response. Given its minimal training requirements, the Timed Gait would have utility in resource limited settings.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Peers, Pieter; Hawkins, Tim; Debevec, Paul
A Reflective Light Stage Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 04 2006, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{peers_reflective_2006,
title = {A Reflective Light Stage},
author = {Pieter Peers and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-04.2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 04 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel acquisition device to capture high resolution 4D re- flectance fields of real scenes. The device consists of a concave hemispher- ical surface coated with a rough specular paint and a digital video projector with a fish-eye lens positioned near the center of the hemisphere. The scene is placed near the projector, also near the center, and photographed from a fixed vantage point. The projector projects a high-resolution image of incident illu- mination which is reflected by the rough hemispherical surface to become the illumination on the scene. We demonstrate the utility of this device by cap- turing a high resolution hemispherical reflectance field of a specular object which would be difficult to capture using previous acquisition techniques.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Werf, R. J.
Creating Rapport with Virtual Humans Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2006, 2006.
Abstract | Links | BibTeX | Tags:
@techreport{van_der_werf_creating_2006,
title = {Creating Rapport with Virtual Humans},
author = {R. J. Werf},
url = {http://ict.usc.edu/pubs/ICT-TR.02.2006-Rick.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 02 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This report describes the internship about the assignment Creating Rapport with Virtual Humans. The assignment is split up into two separate parts. The first part is to improve the visual feature detection of the current mimicking system [MAA04]. This is going to be done using a Computer Vision approach. Together with two other interns [LAM05] the whole mimicking system was improved, leading to a new Rapport system. The second part involves subject testing with the newly developed system. Firstly the goal is to make a working system that can be reused and expanded in the future. Secondly the goal is to use the data from the subject test to determine if rapport can be created with Virtual Humans. The resulting Rapport system should be a very well reuseable and expandable system. This system makes it possible for other people, unfamiliar with the system, to easily use the system for future testing. Unfortunately too little data was obtained with subject testing to give a solid conclusion whether or not creating rapport with Virtual Humans is possible. The subject testing did lead to a improved testing procedure which makes future testing quite easy.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Parsons, Thomas D.; Kratz, K. M.; Thompson, E.; Stanczyk, F. Z.; Buckwalter, John Galen
DHEA Supplementation and Cognition in Postmenopausal Women Journal Article
In: International Journal of Neuroscience, vol. 16, pp. 141–155, 2006, ISSN: 0020-7454.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_dhea_2006,
title = {DHEA Supplementation and Cognition in Postmenopausal Women},
author = {Thomas D. Parsons and K. M. Kratz and E. Thompson and F. Z. Stanczyk and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/DHEA%20Supplementation%20and%20Cognition%20in%20Postmenopausal%20Women.pdf},
doi = {10.1080/00207450500341506},
issn = {0020-7454},
year = {2006},
date = {2006-01-01},
journal = {International Journal of Neuroscience},
volume = {16},
pages = {141–155},
abstract = {Previous work has suggested that DHEA supplementation may have adverse cognitive effects in elderly women. This article analyzed 24-h measurements of DHEA, DHEAS, and cortisol to determine if cognitive decrease with treatment is mediated by DHEA’s impact on endogenous cortisol. It was found that DHEA administration increased cortisol at several hours during the day. In the treatment group, cortisol was positively associated with cognition at study completion. An increase in negative associations between DHEA(S) levels and cognition was found at completion. Increased cortisol does not explain the cognitive deficits associated with DHEA, suggesting a direct negative effect of exogenous DHEA on cognition.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Roque, Antonio; Ai, Hua; Traum, David
Evaluation of an Information State-Based Dialogue Manager Proceedings Article
In: Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue, Potsdam, Germany, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_evaluation_2006,
title = {Evaluation of an Information State-Based Dialogue Manager},
author = {Antonio Roque and Hua Ai and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20an%20Information%20State-Based%20Dialogue%20Manager.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue},
address = {Potsdam, Germany},
abstract = {We describe an evaluation of an information state-based dialogue manager by measuring its accuracy in information state component updating.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Dillenbourg, Pierre; Traum, David
Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving Journal Article
In: The Journal of the Learning Sciences, vol. 15, no. 1, pp. 121–151, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{dillenbourg_sharing_2006,
title = {Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving},
author = {Pierre Dillenbourg and David Traum},
url = {http://ict.usc.edu/pubs/Sharing%20Solutions-%20Persistence%20and%20Grounding%20in%20Multimodal%20Collaborative%20Problem%20Solving.pdf},
year = {2006},
date = {2006-01-01},
journal = {The Journal of the Learning Sciences},
volume = {15},
number = {1},
pages = {121–151},
abstract = {This article reports on an exploratory study of the relationship between grounding and problem solving in multimodal computer-mediated collaboration. This article examines two different media, a shared whiteboard and a MOO environment that includes a text chat facility. A study was done on how the acknowledgment rate (how often partners give feedback of having perceived, understood, and accepted partner's contributions) varies according to the media and the content of interactions. It was expected that the whiteboard would serve to draw schemata that disambiguate chat utterances. Instead, results show that the whiteboard is primarily used to represent the state of problem solving and the chat is used for grounding information created on the whiteboard. These results are interpreted in terms of persistence: More persistent information is exchanged through the more persistent medium. The whiteboard was used as a shared memory rather than a grounding tool.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Lindheim, Richard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation Book Section
In: Multimodal Intelligent Information Presentation, vol. 27, pp. 305–321, Springer, Netherlands, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{swartout_simulation_2006,
title = {Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Richard Lindheim and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/SIMULATION%20MEETS%20HOLLYWOOD-%20Integrating%20Graphics,%20Sound,%20Story%20and%20Character%20for%20Immersive%20Simulation.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Multimodal Intelligent Information Presentation},
volume = {27},
pages = {305–321},
publisher = {Springer},
address = {Netherlands},
abstract = {The Institute for Creative Technologies was created at the University of Southern California with the goal of bringing together researchers in simulation technology to collaborate with people from the entertainment industry. The idea was that much more compelling simulations could be developed if researchers who understood state-of-the-art simulation technology worked together with writers and directors who knew how to create compelling stories and characters. This paper presents our first major effort to realize that vision, the Mission Rehearsal Exercise Project, which confronts a soldier trainee with the kinds of dilemmas he might reasonably encounter in a peacekeeping operation. The trainee is immersed in a synthetic world and interacts with virtual humans: artificially intelligent and graphically embodied conversational agents that understand and generate natural language, reason about world events and respond appropriately to the trainee's actions or commands. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have also joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. In this paper we describe the Mission Rehearsal Exercise system and the insights gained through this large-scale integration.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Rosenbloom, Paul
A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond Journal Article
In: Tutorials in Quantitative Methods for Psychology, vol. 2, no. 2, pp. 43–51, 2006.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@article{rosenbloom_cognitive_2006,
title = {A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/A%20Cognitive%20Odyssey-%20From%20the%20Power%20Law%20of%20Practice%20to%20a%20General%20Learning%20Mechanism%20and%20Beyond.pdf},
year = {2006},
date = {2006-01-01},
journal = {Tutorials in Quantitative Methods for Psychology},
volume = {2},
number = {2},
pages = {43–51},
abstract = {This article traces a line of research that began with the establishment of a pervasive regularity in human performance – the Power Law of Practice – and proceeded through several decades' worth of investigations that this opened up into learning and cognitive architecture. The results touch on both cognitive psychology and artificial intelligence, and more specifically on the possibily of building general learning mechanisms/systems. It is a story whose final chapter is still to be written.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Feintuch, Uri; Liat, Raz; Hwang, Jane; Josman, Naomi; Katz, Noomi; Kizony, Rachel; Rand, Debbie; Rizzo, Albert; Shahar, Meir; Yongseok, Jang; Weiss, Patrice L. (Tamar)
Integrating haptic-tactile feedback into a video capture based VE for rehabilitation Journal Article
In: CyberPsychology and Behavior, vol. 9, no. 2, pp. 129–132, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{feintuch_integrating_2006,
title = {Integrating haptic-tactile feedback into a video capture based VE for rehabilitation},
author = {Uri Feintuch and Raz Liat and Jane Hwang and Naomi Josman and Noomi Katz and Rachel Kizony and Debbie Rand and Albert Rizzo and Meir Shahar and Jang Yongseok and Patrice L. (Tamar) Weiss},
url = {http://ict.usc.edu/pubs/Integrating%20Haptic-Tactile%20Feedback%20into%20a%20Video-Capture%E2%80%93Based%20Virtual%20Environment%20for%20Rehabilitation.pdf},
year = {2006},
date = {2006-01-01},
journal = {CyberPsychology and Behavior},
volume = {9},
number = {2},
pages = {129–132},
abstract = {Video-capture virtual reality (VR) systems are gaining popularity as intervention tools. Todate, these platforms offer visual and audio feedback but do not provide haptic feedback. Wecontend that adding haptic feedback may enhance the quality of intervention for various theoretical and empirical reasons. This study aims to integrate haptic-tactile feedback into avideo capture system (GX VR), which is currently applied for rehabilitation. The proposedmulti-modal system can deliver audio-visual as well as vibrotactile feedback. The latter isprovided via small vibratory discs attached to the patient's limbs. This paper describes thesystem, the guidelines of its design, and the ongoing usability study.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Parsons, Thomas D.; Horst, Charles; Hall, Colin D.
Thoughts of death and suicidal ideation in nonpsychiatric human immunodeficiency virus seropositive individuals Journal Article
In: Death Studies, vol. 30, pp. 455–469, 2006, ISSN: 0748-1187.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_thoughts_2006,
title = {Thoughts of death and suicidal ideation in nonpsychiatric human immunodeficiency virus seropositive individuals},
author = {R. Kevin Robertson and Thomas D. Parsons and Charles Horst and Colin D. Hall},
url = {http://ict.usc.edu/pubs/THOUGHTS%20OF%20DEATH%20AND%20SUICIDAL%20IDEATION%20IN%20NONPSYCHIATRIC%20HUMAN%20IMMUNODEFICIENCY%20VIRUS%20SEROPOSITIVE%20INDIVIDUALS.pdf},
doi = {10.1080/07481180600614435},
issn = {0748-1187},
year = {2006},
date = {2006-01-01},
journal = {Death Studies},
volume = {30},
pages = {455–469},
abstract = {The present study examines the prevalence of death thoughts and suicidality in HIV infection. Subjects (n = 246) were examined for psychiatric morbidity and suicidality. Compared to high risk HIV seronegatives, HIV seropositives (HIV•) had significantly increased frequency and severity of both suicidal ideation and death thoughts. Two-thirds of seropositives had suicidal ideation at some point; half of the seropositives reported suicide plans and one quarter suicide attempts; and third of seropositives reported current suicidal ideation. Suicidal ideation did not increase with advancing disease. The high prevalence of suicidal ideation suggests inclusion of its assessment in HIV treatment regardless of stage.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}