Publications
Search
Traum, David
Using Dialogue System Technology to Support Interactive History Learning Journal Article
In: Journal of Japan Society of Artificial Intelligence (JSAI), vol. 31, no. 6, pp. 806, 2016.
@article{traum_using_2016,
title = {Using Dialogue System Technology to Support Interactive History Learning},
author = {David Traum},
url = {http://www.ai-gakkai.or.jp/en/en/vol31_no6/},
year = {2016},
date = {2016-11-01},
journal = {Journal of Japan Society of Artificial Intelligence (JSAI)},
volume = {31},
number = {6},
pages = {806},
abstract = {We describe the use of spoken dialogue technology to enhance informal history learning. We describe several uses for this technology, including allowing learners to engage in natural interactions at a historical site, allowing learners to talk with recreations of historical figures, and using oral history recordings of a witness to create a dialogue experience. Two projects are highlighted, one to give a guided experience of a historical location, and another, New Dimensions in Testimony, that allows an experience similar to face to face conversation with a Holocaust survivor. These techniques allow many of the bene ts of an intimate connection to historical places and people, through direct interaction and user initiative, but can also be delivered to a mass audience, formerly only reachable by broadcast, non-interactive media.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert; Scherer, Scherer; DeVault, David; Gratch, Jonathan; Artstein, Ronald; Hartholt, Arno; Lucas, Gale; Marsella, Stacy; Morbini, Fabrizio; Nazarian, Angela; Stratou, Giota; Traum, David; Wood, Rachel; Boberg, Jill; Morency, Louis Philippe
Detection and computational analysis of psychological signals using a virtual human interviewing agent Journal Article
In: Journal of Pain Management, pp. 311–321, 2016, ISSN: 1939-5914.
@article{rizzo_detection_2016,
title = {Detection and computational analysis of psychological signals using a virtual human interviewing agent},
author = {Albert Rizzo and Scherer Scherer and David DeVault and Jonathan Gratch and Ronald Artstein and Arno Hartholt and Gale Lucas and Stacy Marsella and Fabrizio Morbini and Angela Nazarian and Giota Stratou and David Traum and Rachel Wood and Jill Boberg and Louis Philippe Morency},
url = {http://www.icdvrat.org/2014/papers/ICDVRAT2014_S03N3_Rizzo_etal.pdf},
issn = {1939-5914},
year = {2016},
date = {2016-11-01},
journal = {Journal of Pain Management},
pages = {311–321},
abstract = {It has long been recognized that facial expressions, body posture/gestures and vocal parameters play an important role in human communication and the implicit signalling of emotion. Recent advances in low cost computer vision and behavioral sensing technologies can now be applied to the process of making meaningful inferences as to user state when a person interacts with a computational device. Effective use of this additive information could serve to promote human interaction with virtual human (VH) agents that may enhance diagnostic assessment. This paper will focus on our current research in these areas within the DARPA-funded "Detection and Computational Analysis of Psychological Signals" project, with specific attention to the SimSensei application use case. SimSensei is a virtual human interaction platform that is able to sense and interpret real-time audiovisual behavioral signals from users interacting with the system. It is specifically designed for health care support and leverages years of virtual human research and development at USC-ICT. The platform enables an engaging face-to-face interaction where the virtual human automatically reacts to the state and inferred intent of the user through analysis of behavioral signals gleaned from facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human to human interaction and communication, SimSensei aims to capture and infer from user non-verbal communication to improve engagement between a VH and a user. The system can also quantify and interpret sensed behavioral signals longitudinally that can be used to inform diagnostic assessment within a clinical context.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pestian, John P.; Sorter, Michael; Connolly, Brian; Cohen, Kevin Bretonnel; McCullumsmith, Cheryl; Gee, Jeffry T.; Morency, Louis-Philippe; Scherer, Stefan; Rohlfs, Lesley
A Machine Learning Approach to Identifying the Thought Markers of Suicidal Subjects: A Prospective Multicenter Trial Journal Article
In: Suicide and Life-Threatening Behavior, 2016, ISSN: 03630234.
@article{pestian_machine_2016,
title = {A Machine Learning Approach to Identifying the Thought Markers of Suicidal Subjects: A Prospective Multicenter Trial},
author = {John P. Pestian and Michael Sorter and Brian Connolly and Kevin Bretonnel Cohen and Cheryl McCullumsmith and Jeffry T. Gee and Louis-Philippe Morency and Stefan Scherer and Lesley Rohlfs},
url = {http://doi.wiley.com/10.1111/sltb.12312},
doi = {10.1111/sltb.12312},
issn = {03630234},
year = {2016},
date = {2016-11-01},
journal = {Suicide and Life-Threatening Behavior},
abstract = {Death by suicide demonstrates profound personal suffering and societal failure. While basic sciences provide the opportunity to understand biological markers related to suicide, computer science provides opportunities to understand suicide thought markers. In this novel prospective, multimodal, multicenter, mixed demographic study, we used machine learning to measure and fuse two classes of suicidal thought markers: verbal and nonverbal. Machine learning algorithms were used with the subjects’ words and vocal characteristics to classify 379 subjects recruited from two academic medical centers and a rural community hospital into one of three groups: suicidal, mentally ill but not suicidal, or controls. By combining linguistic and acoustic characteristics, subjects could be classified into one of the three groups with up to 85% accuracy. The results provide insight into how advanced technology can be used for suicide assessment and prevention.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Woolley, Joshua; Khooshabeh, Peter; Scherer, Stefan
Getting to know you: a multimodal investigation of team behavior and resilience to stress Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 193–200, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{neubauer_getting_2016,
title = {Getting to know you: a multimodal investigation of team behavior and resilience to stress},
author = {Catherine Neubauer and Joshua Woolley and Peter Khooshabeh and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993195},
doi = {10.1145/2993148.2993195},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {193–200},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {Team cohesion has been suggested to be a critical factor in emotional resilience following periods of stress. Team cohesion may depend on several factors including emotional state, communication among team members and even psychophysiological response. The present study sought to employ several multimodal techniques designed to investigate team behavior as a means of understanding resilience to stress. We recruited 40 subjects to perform a cooperative-task in gender-matched, two-person teams. They were responsible for working together to meet a common goal, which was to successfully disarm a simulated bomb. This high-workload task requires successful cooperation and communication among members. We assessed several behaviors that relate to facial expression, word choice and physiological responses (i.e., heart rate variability) within this scenario. A manipulation of an â€oeice breaker†condition was used to induce a level of comfort or familiarity within the team prior to the task. We found that individuals in the â€oeice breaker†condition exhibited better resilience to subjective stress following the task. These individuals also exhibited more insight and cognitive speech, more positive facial expressions and were also able to better regulate their emotional expression during the task, compared to the control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Prendinger, Helmut; Scherer, Stefan
Native vs. Non-native Language Fluency Implications on Multimodal Interaction for Interpersonal Skills Training Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 386–393, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{chollet_native_2016,
title = {Native vs. Non-native Language Fluency Implications on Multimodal Interaction for Interpersonal Skills Training},
author = {Mathieu Chollet and Helmut Prendinger and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993196},
doi = {10.1145/2993148.2993196},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {386–393},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {New technological developments in the eld of multimodal interaction show great promise for the improvement and assessment of public speaking skills. However, it is unclear how the experience of non-native speakers interacting with such technologies di ers from native speakers. In particular, nonnative speakers could bene t less from training with multimodal systems compared to native speakers. Additionally, machine learning models trained for the automatic assessment of public speaking ability on data of native speakers might not be performing well for assessing the performance of non-native speakers. In this paper, we investigate two aspects related to the performance and evaluation of multimodal interaction technologies designed for the improvement and assessment of public speaking between a population of English native speakers and a population of non-native English speakers. Firstly, we compare the experiences and training outcomes of these two populations interacting with a virtual audience system designed for training public speaking ability, collecting a dataset of public speaking presentations in the process. Secondly, using this dataset, we build regression models for predicting public speaking performance on both populations and evaluate these models, both on the population they were trained on and on how they generalize to the second population.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olszewski, Kyle; Lim, Joseph J.; Saito, Shunsuke; Li, Hao
High-fidelity facial and speech animation for VR HMDs Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 6, pp. 1–14, 2016, ISSN: 07300301.
@article{olszewski_high-fidelity_2016,
title = {High-fidelity facial and speech animation for VR HMDs},
author = {Kyle Olszewski and Joseph J. Lim and Shunsuke Saito and Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=2980179.2980252},
doi = {10.1145/2980179.2980252},
issn = {07300301},
year = {2016},
date = {2016-11-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {6},
pages = {1–14},
abstract = {Several significant challenges currently prohibit expressive interaction in virtual reality (VR). The occlusion introduced by modern head-mounted displays (HMDs) makes most existing techniques for facial tracking intractable in this scenario. Furthermore, even state-of-the-art techniques used for real-time facial tracking in less constrained environments fail to capture subtle details of the user’s facial expressions that are essential for compelling speech animation. We introduce a novel system for HMD users to control a digital avatar in real-time while producing plausible speech animation and emotional expressions. Using a monocular camera attached to the front of an HMD, we record video sequences from multiple subjects performing a variety of facial expressions and speaking several phonetically-balanced sentences. These images are used with artist-generated animation data corresponding to these sequences to train a convolutional neural network (CNN) to regress images of a user’s mouth region to the parameters that control a digital avatar. To make training this system more tractable, we make use of audiobased alignment techniques to map images of multiple users making the same utterance to the corresponding animation parameters. We demonstrate that our regression technique is also feasible for tracking the expressions around the user’s eye region, including the eyebrows, with an infrared (IR) camera within the HMD, thereby enabling full facial tracking. This system requires no user-specific calibration, makes use of easily obtainable consumer hardware, and produces high-quality animations of both speech and emotional expressions. Finally, we demonstrate the quality of our system on a variety of subjects and evaluate its performance against state-of-the-art realtime facial tracking techniques.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale; Stratou, Giota; Lieblich, Shari; Gratch, Jonathan
Trust Me: Multimodal Signals of Trustworthiness Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 5–12, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{lucas_trust_2016,
title = {Trust Me: Multimodal Signals of Trustworthiness},
author = {Gale Lucas and Giota Stratou and Shari Lieblich and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993178},
doi = {10.1145/2993148.2993178},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {5–12},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {This paper builds on prior psychological studies that identify signals of trustworthiness between two human negotiators. Unlike prior work, the current work tracks such signals automatically and fuses them into computational models that predict trustworthiness. To achieve this goal, we apply automatic trackers to recordings of human dyads negotiating in a multi-issue bargaining task. We identify behavioral indicators in different modalities (facial expressions, gestures, gaze, and conversational features) that are predictive of trustworthiness. We predict both objective trustworthiness (i.e., are they honest) and perceived trustworthiness (i.e., do they seem honest to their interaction partner). Our experiments show that people are poor judges of objective trustworthiness (i.e., objective and perceived trustworthiness are predicted by different indicators), and that multimodal approaches better predict objective trustworthiness, whereas people overly rely on facial expressions when judging the honesty of their partner. Moreover, domain knowledge (from the literature and prior analysis of behaviors) facilitates the model development process.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Traum, David; Boberg, Jill; Gainer, Alesia; Gratch, Jonathan; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Niki and Julie: A Robot and Virtual Human for Studying Multimodal Social Interaction Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 402–403, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{artstein_niki_2016,
title = {Niki and Julie: A Robot and Virtual Human for Studying Multimodal Social Interaction},
author = {Ron Artstein and David Traum and Jill Boberg and Alesia Gainer and Jonathan Gratch and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2998532},
doi = {10.1145/2993148.2998532},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {402–403},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {We demonstrate two agents, a robot and a virtual human, which can be used for studying factors that impact social influence. The agents engage in dialogue scenarios that build familiarity, share information, and attempt to influence a human participant. The scenarios are variants of the classical “survival task,” where members of a team rank the importance of a number of items (e.g., items that might help one survive a crash in the desert). These are ranked individually and then re-ranked following a team discussion, and the difference in ranking provides an objective measure of social influence. Survival tasks have been used in psychology, virtual human research, and human-robot interaction. Our agents are operated in a “Wizard-of-Oz” fashion, where a hidden human operator chooses the agents’ dialogue actions while interacting with an experiment participant.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark G.; Georgila, Kallirroi; Nye, Benjamin D.; Auerbach, Daniel; Liu, Zhi Fei; DiNinni, Richard
Learning, Adaptive Support, Student Traits, and Engagement in Scenario-Based Learning Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{core_learning_2016,
title = {Learning, Adaptive Support, Student Traits, and Engagement in Scenario-Based Learning},
author = {Mark G. Core and Kallirroi Georgila and Benjamin D. Nye and Daniel Auerbach and Zhi Fei Liu and Richard DiNinni},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {Scenario-based training systems pose an especially difficult challenge for an intelligent tutoring system (ITS). In addition to the basic problems of deciding when to intervene and what guidance to provide, the ITS must decide whether to give guidance directly (e.g., a hint message), indirectly through positive/negative results in the scenario, or to delay guidance until a post-scenario review session. There are a number of factors that an adaptive ITS should consider and we use self-report survey instruments to investigate the relationship between traits, learning strategies, expectations, learner behaviors derived from log files, post-use perceptions of the system, and pre-test and post-test results. We use the ELITE Lite Counseling training system as a testbed for our experiments. This system uses virtual role players to allow learners to practice leadership counseling skills, and is in use at the United States Military Academy (USMA). This paper analyzes two data sets. We collected data from local university students, a non-military population of roughly the same age as USMA Cadets using the system. For these local participants, we could administer surveys and pre-tests and post-tests, and collect log files recording clicks made while using ELITE Lite. The second data set comes from USMA itself but is limited to log files. In both populations, the ITS’s hints are effective at boosting scenario performance, and for the university students, the overall experience promoted learning, and survey results suggest that higher levels of organization in study habits may lead to greater learning with ELITE Lite. For the USMA Cadets, ELITE Lite is part of their Military Leadership course rather than an experiment, which could explain why we found higher scenario performance on average than the non-military population, and more use of the post-scenario review feature.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale; Szablowski, Evan; Gratch, Jonathan; Feng, Andrew; Huang, Tiffany; Boberg, Jill; Shapiro, Ari
The effect of operating a virtual doppleganger in a 3D simulation Proceedings Article
In: Proceedings of the 9th International Conference on Motion in Games, pp. 167–174, ACM Press, Burlingame, CA, 2016, ISBN: 978-1-4503-4592-7.
@inproceedings{lucas_effect_2016,
title = {The effect of operating a virtual doppleganger in a 3D simulation},
author = {Gale Lucas and Evan Szablowski and Jonathan Gratch and Andrew Feng and Tiffany Huang and Jill Boberg and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2994258.2994263},
doi = {10.1145/2994258.2994263},
isbn = {978-1-4503-4592-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 9th International Conference on Motion in Games},
pages = {167–174},
publisher = {ACM Press},
address = {Burlingame, CA},
abstract = {Recent advances in scanning technology have enabled the widespread capture of 3D character models based on human subjects. Intuition suggests that, with these new capabilities to create avatars that look like their users, every player should have his or her own avatar to play video games or simulations. We explicitly test the impact of having one’s own avatar (vs. a yoked control avatar) in a simulation (i.e., maze running task with mines). We test the impact of avatar identity on both subjective (e.g., feeling connected and engaged, liking avatar’s appearance, feeling upset when avatar’s injured, enjoying the game) and behavioral variables (e.g., time to complete task, speed, number of mines triggered, riskiness of maze path chosen). Results indicate that having an avatar that looks like the user improves their subjective experience, but there is no significant effect on how users perform in the simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ryan, James; Swanson, Reid
Recognizing Coherent Narrative Blog Content Proceedings Article
In: Proceeedings of the International Conference on Interactive Digital Storytelling, pp. 234–246, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-48278-1 978-3-319-48279-8.
@inproceedings{ryan_recognizing_2016,
title = {Recognizing Coherent Narrative Blog Content},
author = {James Ryan and Reid Swanson},
url = {http://link.springer.com/10.1007/978-3-319-48279-8_21},
doi = {10.1007/978-3-319-48279-8_21},
isbn = {978-3-319-48278-1 978-3-319-48279-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceeedings of the International Conference on Interactive Digital Storytelling},
pages = {234–246},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Interactive storytelling applications have at their disposal massive numbers of human-authored stories, in the form of narrative weblog posts, from which story content could be harvested and repurposed. Such repurposing is currently inhibited, however, in that many blog narratives are not sufficiently coherent for use in these applications. In a narrative that is not coherent, the order of the events in the narrative is not clear given the text of the story. We present the results of a study exploring automatic methods for estimating the coherence of narrative blog posts. In the end, our simplest model—one that only considers the degree to which story text is capitalized and punctuated—vastly outperformed a baseline model and, curiously, a series of more sophisticated models. Future work may use this simple model as a baseline, or may use it along with the classifier that it extends to automatically extract large numbers of narrative blog posts from the web for purposes such as interactive storytelling.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Saito, Shunsuke; Li, Tianye; Li, Hao
Real-Time Facial Segmentation and Performance Capture from RGB Input Proceedings Article
In: Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016), pp. 244–261, Springer International Publishing, Amsterdam, The Netherlands, 2016, ISBN: 978-3-319-46483-1 978-3-319-46484-8.
@inproceedings{saito_real-time_2016,
title = {Real-Time Facial Segmentation and Performance Capture from RGB Input},
author = {Shunsuke Saito and Tianye Li and Hao Li},
url = {https://link.springer.com/chapter/10.1007/978-3-319-46484-8_15},
isbn = {978-3-319-46483-1 978-3-319-46484-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016)},
pages = {244–261},
publisher = {Springer International Publishing},
address = {Amsterdam, The Netherlands},
abstract = {We introduce the concept of unconstrained real-time 3D facial performance capture through explicit semantic segmentation in the RGB input. To ensure robustness, cutting edge supervised learning approaches rely on large training datasets of face images captured in the wild. While impressive tracking quality has been demonstrated for faces that are largely visible, any occlusion due to hair, accessories, or hand-to-face gestures would result in significant visual artifacts and loss of tracking accuracy. The modeling of occlusions has been mostly avoided due to its immense space of appearance variability. To address this curse of high dimensionality, we perform tracking in unconstrained images assuming non-face regions can be fully masked out. Along with recent breakthroughs in deep learning, we demonstrate that pixel-level facial segmentation is possible in real-time by repurposing convolutional neural networks designed originally for general semantic segmentation. We develop an efficient architecture based on a two-stream deconvolution network with complementary characteristics, and introduce carefully designed training samples and data augmentation strategies for improved segmentation accuracy and robustness. We adopt a state-of-the-art regression-based facial tracking framework with segmented face images as training, and demonstrate accurate and uninterrupted facial performance capture in the presence of extreme occlusion and even side views. Furthermore, the resulting segmentation can be directly used to composite partial 3D face models on the input images and enable seamless facial manipulation tasks, such as virtual make-up or face replacement.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Rhuizhe; Wei, Lingyu; Vouga, Etienne; Huang, Qixing; Ceylan, Duygu; Medioni, Gerard; Li, Hao
Capturing Dynamic Textured Surfaces of Moving Targets Proceedings Article
In: Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016 Spotlight Presentation), Springer International Publishing, Amsterdam, The Netherlands, 2016, ISBN: 978-3-319-46477-0 978-3-319-46478-7.
@inproceedings{wang_capturing_2016,
title = {Capturing Dynamic Textured Surfaces of Moving Targets},
author = {Rhuizhe Wang and Lingyu Wei and Etienne Vouga and Qixing Huang and Duygu Ceylan and Gerard Medioni and Hao Li},
url = {https://link.springer.com/chapter/10.1007/978-3-319-46478-7_17},
isbn = {978-3-319-46477-0 978-3-319-46478-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016 Spotlight Presentation)},
publisher = {Springer International Publishing},
address = {Amsterdam, The Netherlands},
abstract = {We present an end-to-end system for reconstructing complete watertight and textured models of moving subjects such as clothed humans and animals, using only three or four handheld sensors. The heart of our framework is a new pairwise registration algorithm that minimizes, using a particle swarm strategy, an alignment error metric based on mutual visibility and occlusion. We show that this algorithm reliably registers partial scans with as little as 15% overlap without requiring any initial correspondences, and outperforms alternative global registration algorithms. This registration algorithm allows us to reconstruct moving subjects from free-viewpoint video produced by consumer-grade sensors, without extensive sensor calibration, constrained capture volume, expensive arrays of cameras, or templates of the subject geometry.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, J. Adam; Krum, David M.; Bolas, Mark T.
Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments Journal Article
In: ACM Transactions on Applied Perception, vol. 14, no. 2, pp. 1–17, 2016, ISSN: 15443558.
@article{jones_vertical_2016,
title = {Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments},
author = {J. Adam Jones and David M. Krum and Mark T. Bolas},
url = {http://dl.acm.org/citation.cfm?id=2983631},
doi = {10.1145/2983631},
issn = {15443558},
year = {2016},
date = {2016-10-01},
journal = {ACM Transactions on Applied Perception},
volume = {14},
number = {2},
pages = {1–17},
abstract = {In this article, we detail a series of experiments that examines the effect of vertical field-of-view extension and the addition of non-specific peripheral visual stimulation on gait characteristics and distance judgments in a head-worn virtual environment. Specifically, we examined four field-of-view configurations: a common 60° diagonal field of view (48° × 40°), a 60° diagonal field of view with the addition of a luminous white frame in the far periphery, a field of view with an extended upper edge, and a field of view with an extended lower edge. We found that extension of the field of view, either with spatially congruent or spatially non-informative visuals, resulted in improved distance judgments and changes in observed posture. However, these effects were not equal across all field-of-view configurations, suggesting that some configurations may be more appropriate than others when balancing performance, cost, and ergonomics.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Feng, Andrew W.; Seymour, Mike; Shapiro, Ari
Smart Mobile Virtual Characters: Video Characters vs. Animated Characters Proceedings Article
In: Proceedings of the Fourth International Conference on Human Agent Interaction, pp. 371–374, ACM Press, Biopolis, Singapore, 2016, ISBN: 978-1-4503-4508-8.
@inproceedings{kang_smart_2016,
title = {Smart Mobile Virtual Characters: Video Characters vs. Animated Characters},
author = {Sin-Hwa Kang and Andrew W. Feng and Mike Seymour and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2980511},
doi = {10.1145/2974804.2980511},
isbn = {978-1-4503-4508-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the Fourth International Conference on Human Agent Interaction},
pages = {371–374},
publisher = {ACM Press},
address = {Biopolis, Singapore},
abstract = {This study investigates presentation techniques for a chatbased virtual human that communicates engagingly with users via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with an animated 3D virtual character as opposed to a real human video character capable of displaying backchannel behaviors. The findings of our study demonstrate that people are socially attracted to a 3D animated character that does not display backchannel behaviors more than a real human video character that presents realistic backchannel behaviors. People engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that exhibits backchannel behaviors, compared to communicating with a real human video character that does not display backchannel behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Feng, Andrew W.; Seymour, Mike; Shapiro, Ari
Study comparing video-based characters and 3D-based characters on mobile devices for chat Proceedings Article
In: Proceedings of the 9th International Conference on Motion in Games, pp. 181–186, ACM Press, Burlingame, California, 2016, ISBN: 978-1-4503-4592-7.
@inproceedings{kang_study_2016,
title = {Study comparing video-based characters and 3D-based characters on mobile devices for chat},
author = {Sin-Hwa Kang and Andrew W. Feng and Mike Seymour and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2994274},
doi = {10.1145/2994258.2994274},
isbn = {978-1-4503-4592-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 9th International Conference on Motion in Games},
pages = {181–186},
publisher = {ACM Press},
address = {Burlingame, California},
abstract = {This study explores presentation techniques for a chat-based virtual human that communicates engagingly with users. Interactions with the virtual human occur via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with an animated virtual character as opposed to a real human video character capable of displaying realistic backchannel behaviors. An audio-only interface is compared additionally with the two types of characters. The findings of our study suggest that people are socially attracted to a 3D animated character that does not display backchannel behaviors more than a real human video character that presents realistic backchannel behaviors. People engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that exhibits realistic backchannel behaviors, compared to communicating with a real human video character that does not display backchannel behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Joshi, Himanshu; Rosenbloom, Paul S.; Ustun, Volkan
Continuous phone recognition in the Sigma cognitive architecture Journal Article
In: Biologically Inspired Cognitive Architectures, vol. 18, pp. 23–32, 2016, ISSN: 2212683X.
@article{joshi_continuous_2016,
title = {Continuous phone recognition in the Sigma cognitive architecture},
author = {Himanshu Joshi and Paul S. Rosenbloom and Volkan Ustun},
url = {http://linkinghub.elsevier.com/retrieve/pii/S2212683X16300652},
doi = {10.1016/j.bica.2016.09.001},
issn = {2212683X},
year = {2016},
date = {2016-10-01},
journal = {Biologically Inspired Cognitive Architectures},
volume = {18},
pages = {23–32},
abstract = {Spoken language processing is an important capability of human intelligence that has hitherto been unexplored by cognitive architectures. This reflects on both the symbolic and sub-symbolic nature of the speech problem, and the capabilities provided by cognitive architectures to model the latter and its rich interplay with the former. Sigma has been designed to leverage the state-of-the-art hybrid (discrete + continuous) mixed (symbolic + probabilistic) capability of graphical models to provide in a uniform non-modular fashion effective forms of, and integration across, both cognitive and sub-cognitive behavior. In this article, previous work on speaker dependent isolated word recognition has been extended to demonstrate Sigma’s feasibility to process a stream of fluent audio and recognize phones, in an online and incremental manner with speaker independence. Phone recognition is an important step in integrating spoken language processing into Sigma. This work also extends the acoustic front-end used in the previous work in service of speaker independence. All of the knowledge used in phone recognition was added supraarchitecturally – i.e. on top of the architecture – without requiring the addition of new mechanisms to the architecture.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bernardet, Ulysses; Chollet, Mathieu; DiPaola, Steve; Scherer, Stefan
An Architecture for Biologically Grounded Real-Time Reflexive Behavior Book Section
In: Intelligent Virtual Agents, vol. 10011, pp. 295–305, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-47664-3 978-3-319-47665-0.
@incollection{bernardet_architecture_2016,
title = {An Architecture for Biologically Grounded Real-Time Reflexive Behavior},
author = {Ulysses Bernardet and Mathieu Chollet and Steve DiPaola and Stefan Scherer},
url = {http://download.springer.com/static/pdf/224/chp%253A10.1007%252F978-3-319-47665-0_26.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-319-47665-0_26&token2=exp=1485296780 acl=%2Fstatic%2Fpdf%2F224%2Fchp%25253A10.1007%25252F978-3-319-47665-0_26.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Fchapter%252F10.1007%252F978-3-319-47665-0_26* hmac=1bf37d11eda93937fedd36843994ffdaf645ebda569c86edbcf61ca905942f89},
isbn = {978-3-319-47664-3 978-3-319-47665-0},
year = {2016},
date = {2016-10-01},
booktitle = {Intelligent Virtual Agents},
volume = {10011},
pages = {295–305},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {In this paper, we present a reflexive behavior architecture, that is geared towards the application in the control of the non-verbal behavior of the virtual humans in a public speaking training system. The model is organized along the distinction between behavior triggers that are internal (endogenous) to the agent, and those that origin in the environment (exogenous). The endogenous subsystem controls gaze behavior, triggers self-adaptors, and shifts between different postures, while the exogenous system controls the reaction towards auditory stimuli with different temporal and valence characteristics. We evaluate the different components empirically by letting participants compare the output of the proposed system to valid alternative variations.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Valstar, Michel; Gratch, Jonathan; Schuller, Björn; Ringeval, Fabien; Lalanne, Denis; Torres, Mercedes Torres; Scherer, Stefen; Stratou, Giota; Cowie, Roddy; Pantic, Maja
AVEC 2016: Depression, Mood, and Emotion Recognition Workshop and Challenge Proceedings Article
In: Proceedings of the 6th International Workshop on Audio/Visual Emotion Challenge, pp. 3–10, ACM Press, Amsterdam, The Netherlands, 2016, ISBN: 978-1-4503-4516-3.
@inproceedings{valstar_avec_2016,
title = {AVEC 2016: Depression, Mood, and Emotion Recognition Workshop and Challenge},
author = {Michel Valstar and Jonathan Gratch and Björn Schuller and Fabien Ringeval and Denis Lalanne and Mercedes Torres Torres and Stefen Scherer and Giota Stratou and Roddy Cowie and Maja Pantic},
url = {http://dl.acm.org/citation.cfm?id=2988258},
doi = {10.1145/2988257.2988258},
isbn = {978-1-4503-4516-3},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 6th International Workshop on Audio/Visual Emotion Challenge},
pages = {3–10},
publisher = {ACM Press},
address = {Amsterdam, The Netherlands},
abstract = {The Audio/Visual Emotion Challenge and Workshop (AVEC 2016) "Depression, Mood and Emotion" will be the sixth competition event aimed at comparison of multimedia processing and machine learning methods for automatic audio, visual and physiological depression and emotion analysis, with all participants competing under strictly the same conditions. The goal of the Challenge is to provide a common benchmark test set for multi-modal information processing and to bring together the depression and emotion recognition communities, as well as the audio, video and physiological processing communities, to compare the relative merits of the various approaches to depression and emotion recognition under well-defined and strictly comparable conditions and establish to what extent fusion of the approaches is possible and beneficial. This paper presents the challenge guidelines, the common data used, and the performance of the baseline system on the two tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Tin, Jessica; Brown, Jeremy; Fritzsch, Elisabeth; Gabber, Shirley
Wochat Chatbot User Experience Summary Proceedings Article
In: Proceedings of the 2016 IVA: WOCHAT Workshop, Zerotype, Los Angeles, CA, 2016.
@inproceedings{gordon_wochat_2016,
title = {Wochat Chatbot User Experience Summary},
author = {Carla Gordon and Jessica Tin and Jeremy Brown and Elisabeth Fritzsch and Shirley Gabber},
url = {http://workshop.colips.org/wochat/documents/ST-281.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 IVA: WOCHAT Workshop},
publisher = {Zerotype},
address = {Los Angeles, CA},
abstract = {A team of 5 interns at the USC Institute for Creative Technologies interacted with 5 of the 6 chatbots; IRIS, Sammy, Sarah, TickTock and Joker. Unfortunately no one in our team could get the 6th chatbot, pyEliza, working. We found that there were certainly some chatbots that were better than others, and some of us were surprised by how distinct each bot felt from the others. One member commented on how they felt as though each different chatbot had an individual “voice” so to speak. Others were surprised by just how much of a “personality” the bots seemed to have. Most members of our team cited IRIS as their favorite, in terms of being capable of producing naturalistic conversation, with Sammy taking a close second. However, only one member of the team was able to interact with Sarah and TickTock, but that member cited TickTock as a capable conversation partner, and Sarah as being the best bot on a number of measures including appropriateness of responses and overall conversation cohesiveness. Therefore, perhaps if more members had been able to interact with Sarah and TickTock they may have ranked higher. Lastly, Joker was by far our least favorite, with whom no member of our team was able to have anything resembling a naturalistic or even cohesive conversation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2006
Mao, Wenji; Gratch, Jonathan
Evaluating a Computational Model of Social Causality and Responsibility Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Hakodate, Japan, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_evaluating_2006,
title = {Evaluating a Computational Model of Social Causality and Responsibility},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20Computational%20Model%20of%20Social%20Causality%20and%20Responsibility.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Hakodate, Japan},
abstract = {Intelligent agents are typically situated in a social environment and must reason about social cause and effect. Such reasoning is qualitatively different from physical causal reasoning that underlies most intelligent systems. Modeling social causal reasoning can enrich the capabilities of multi-agent systems and intelligent user interfaces. In this paper, we empirically evaluate a computational model of social causality and responsibility against human social judgments. Results from our experimental studies show that in general, the model's predictions of internal variables and inference process are consistent with human responses, though they also suggest some possible refinement to the computational model.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Soo-Min; Hovy, Eduard
Identifying and Analyzing Judgment Opinions Proceedings Article
In: Proceedings of the Humans Language Technology/North American Association of Computational Linguistics Conference, New York, NY, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{kim_identifying_2006,
title = {Identifying and Analyzing Judgment Opinions},
author = {Soo-Min Kim and Eduard Hovy},
url = {http://ict.usc.edu/pubs/Identifying%20and%20Analyzing%20Judgment%20Opinions.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the Humans Language Technology/North American Association of Computational Linguistics Conference},
address = {New York, NY},
abstract = {In this paper, we introduce a methodology for analyzing judgment opinions. We define a judgment opinion as consisting of a valence, a holder, and a topic. We decompose the task of opinion analysis into four parts: 1) recognizing the opinion; 2) identifying the valence; 3) identifying the holder; and 4) identifying the topic. In this paper, we address the first three parts and evaluate our methodology using both intrinsic and extrinsic measures},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Clevenger, William
A Culturally-enhanced Environmental Framework for Virtual Environments Proceedings Article
In: Proceedings of Behavior Representation in Modeling and Simulation, Baltimore, MD, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{mcalinden_culturally-enhanced_2006,
title = {A Culturally-enhanced Environmental Framework for Virtual Environments},
author = {Ryan McAlinden and William Clevenger},
url = {http://ict.usc.edu/pubs/A%20Culturally-enhanced%20Environmental%20Framework%20for%20Virtual%20Environments.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of Behavior Representation in Modeling and Simulation},
address = {Baltimore, MD},
abstract = {This paper details the design and implementation of an embedded environmental framework that introduces cultural and social influences into a simulation agent's decision-making process. We describe the current limitations associated with accurately representing culture in virtual environments and military simulations, and how recent research in other academic fields have enabled computational techniques to begin incorporating the effects of culture into AI and behavior subsystems. The technical approach is presented that describes the design and implementation of a hierarchical data model, as well as the software techniques for embedding culturally-specific information inside of a virtual environment. Finally, future work is discussed for developing a more comprehensive and standardized approach for embedding this culturally-specific information inside of the virtual domain.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Braaten, Alyssa J.; Parsons, Thomas D.; McCue, Robert; Sellers, Alfred; Burns, William J.
In: International Journal of Neuroscience, vol. 116, pp. 1271–1293, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{braaten_neurocognitive_2006,
title = {Neurocognitive Differential Diagnosis of Dementing Diseases: Alzheimers Demntia, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder},
author = {Alyssa J. Braaten and Thomas D. Parsons and Robert McCue and Alfred Sellers and William J. Burns},
url = {http://ict.usc.edu/pubs/NEUROCOGNITIVE%20DIFFERENTIAL%20DIAGNOSIS%20OF%20DEMENTING%20DISEASES-%20ALZHEIMER%E2%80%99S%20DEMENTIA,%20VASCULAR%20DEMENTIA,%20FRONTOTEMPORAL%20DEMENTIA,%20AND%20MAJOR%20DEPRESSIVE%20DISORDER.pdf},
year = {2006},
date = {2006-01-01},
journal = {International Journal of Neuroscience},
volume = {116},
pages = {1271–1293},
abstract = {Similarities in presentation of Dementia of Alzheimer's Type, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder, pose differential diagnosis challenges. The current study identiï¬es speciï¬c neuropsychological patterns of scores for Dementia of Alzheimer's Type, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder. Neuropsychological domains directly assessed in the study included: immediate memory, delayed memory, confrontational naming, verbal fluency, attention, concentration, and executive functioning. The results reveal speciï¬c neuropsychological comparative proï¬les for Dementia of Alzheimer's Type, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder. The identiï¬cation of these proï¬les will assist in the differential diagnosis of these disorders and aid in patient treatment.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Lane, H. Chad; Core, Mark; Gomboc, Dave; Solomon, Steve; Lent, Michael; Rosenberg, Milton
Reflective Tutoring for Immersive Simulation Proceedings Article
In: Proceedings of the 8th International Conference on Intelligent Tutoring Systems, Jhongli, Taiwan, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{lane_reflective_2006,
title = {Reflective Tutoring for Immersive Simulation},
author = {H. Chad Lane and Mark Core and Dave Gomboc and Steve Solomon and Michael Lent and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Reflective%20Tutoring%20for%20Immersive%20Simulation.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 8th International Conference on Intelligent Tutoring Systems},
address = {Jhongli, Taiwan},
abstract = {Reflection is critically important for time-constrained training simulations that do not permit extensive tutor-student interactions during an exercise. Here, we describe a reflective tutoring system for a virtual human simulation of negotiation. The tutor helps students review their exercise, elicits where and how they could have done better, and uses explainable artificial intelligence (XAI) to allow students the chance to ask questions about the virtual human's behavior.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Graap, Ken; Pair, Jarrell; Reger,; Treskunov, Anton; Parsons, Thomas D.
User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder Proceedings Article
In: Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology, Esbjerg, Denmark, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_user-centered_2006,
title = {User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder},
author = {Albert Rizzo and Ken Graap and Jarrell Pair and Reger and Anton Treskunov and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/User-centered%20design%20driven%20development%20of%20a%20virtual%20reality%20therapy%20application%20for%20Iraq%20war%20combat-related%20post%20traumatic%20stress%20disorder.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Esbjerg, Denmark},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Buxbaum, Laurel J.; Palermo, Maryann; Mastrogiovanni, Dina; Read, Mary Schmidt; Rosenberg-Pitonyak, Ellen; Rizzo, Albert; Coslett, H. Branch
Assessment of Spatial Neglect with a Virtual Wheelchair Navigation Task Proceedings Article
In: 5th Annual International Workshop on Virtual Rehabilitation, New York, NY, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{buxbaum_assessment_2006,
title = {Assessment of Spatial Neglect with a Virtual Wheelchair Navigation Task},
author = {Laurel J. Buxbaum and Maryann Palermo and Dina Mastrogiovanni and Mary Schmidt Read and Ellen Rosenberg-Pitonyak and Albert Rizzo and H. Branch Coslett},
url = {http://ict.usc.edu/pubs/Assessment%20of%20Spatial%20Neglect%20with%20a%20Virtual%20Wheelchair%20Navigation%20Task.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {5th Annual International Workshop on Virtual Rehabilitation},
address = {New York, NY},
abstract = {We report data from 9 participants with right hemisphere stroke on a new virtual reality (VR) wheelchair navigation test designed to assess lateralized spatial attention and neglect. The test consists of a virtual winding path along which participants must navigate (or be navigated by an experimenter) as they name objects encountered along the way. There are 4 VR task conditions, obtained by crossing the factors array complexity (Simple, Complex) and Driver (Participant, Experimenter). Participants performed the VR task, a real-life wheelchair navigation task, and a battery of tests assessing arousal, visual attention under secondary task demands, and neglect. The VR test showed sensitivity to both array complexity and driver, with best performance occurring in the Experimenter Navigated, Simple Array condition. The VR test also showed high correlations with the wheelchair navigation test, and these correlations were in many instances higher than those between traditional clinical neglect tests and the wheelchair navigation task. Moreover, the VR test detected lateralized attention deficits in participants whose performance was within the normal range on other neglect tests. We conclude that the VR task is sensitive to factors likely to affect the severity of neglect in the daily environment, and shows promise as an efficient, easily administered measure of real-life wheelchair navigation.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Traum, David
Improving Question-Answering With Linking Dialogues Proceedings Article
In: International Conference on Intelligent User Interfaces (IUI-2006), Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gandhe_improving_2006,
title = {Improving Question-Answering With Linking Dialogues},
author = {Sudeep Gandhe and Andrew S. Gordon and David Traum},
url = {http://ict.usc.edu/pubs/Improving%20Question-Answering%20With%20Linking%20Dialogues%20.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Intelligent User Interfaces (IUI-2006)},
address = {Sydney, Australia},
abstract = {Question-answering dialogue systems have found many applications in interactive learning environments. This paper is concerned with one such application for Army leadership training, where trainees input free-text questions that elicit pre-recorded video responses. Since these responses are already crafted before the question is asked, a certain degree of incoherence exists between the question that is asked and the answer that is given. This paper explores the use of short linking dialogues that stand in between the question and its video response to alleviate the problem of incoherence. We describe a set of experiments with human generated linking dialogues that demonstrate their added value. We then describe our implementation of an automated method for utilizing linking dialogues and show that these have better coherence properties than the original system without linking dialogues.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Wenji, Mao
Towards a Validated Model of "Emotional Intelligence" Proceedings Article
In: Proceedings of the 21st National Conference on Artificial Intelligence, pp. 1613–1616, Boston, MA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_towards_2006,
title = {Towards a Validated Model of "Emotional Intelligence"},
author = {Jonathan Gratch and Stacy C. Marsella and Mao Wenji},
url = {http://ict.usc.edu/pubs/Towards%20a%20Validated%20Model%20of%20Emotional%20Intelligence.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 21st National Conference on Artificial Intelligence},
volume = {2},
pages = {1613–1616},
address = {Boston, MA},
abstract = {This article summarizes recent progress in developing a validated computational account of the cognitive antecedents and consequences of emotion. We describe the potential of this work to impact a variety of AI problem domains.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tröster, Alexander I.; Parsons, Thomas D.
Sodium Amytal Testing and Language Journal Article
In: Encyclopedia of Language and Linguistics, vol. 11, pp. 500–503, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{troster_sodium_2006,
title = {Sodium Amytal Testing and Language},
author = {Alexander I. Tröster and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/Sodium%20Amytal%20Testing%20and%20Language.pdf},
year = {2006},
date = {2006-01-01},
journal = {Encyclopedia of Language and Linguistics},
volume = {11},
pages = {500–503},
abstract = {The intracarotid amobarbital test (IAT) was first described by Juhn Wada and thus is often referred to as the 'Wada test.' Wada originally developed this technique to study the interhemispheric spread of epileptiform discharges in patients undergoing unilateral electroconvulsive therapy. Based on his observation that an expressive aphasia resulted when the language dominant hemisphere was injected with amobarbital, he reasoned that this technique might be useful in determining hemispheric language dominance in neurosurgical candidates (and thus minimize speech and language dysfunction in patients undergoing dominant hemisphere surgery).},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2006, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{tariq_efficient_2006,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 01 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present an image-based technique to rapidly ac- quire spatially varying subsurface reflectance prop- erties of a human face. The estimated properties can be used directly to render faces with spatially vary- ing scattering, or can be used to estimate a robust average across the face. We demonstrate our tech- nique with renderings of peoples' faces under novel, spatially-varying illumination and provide compar- isons with current techniques. Our captured data consists of images of the face from a single view- point under two small sets of projected images. The first set, a sequence of phase shifted periodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The second set contains structured light and is used to obtain face geometry. We match the observed reflectance pro- files to scattering properties predicted by a scatter- ing model using a lookup table. From these prop- erties we can generate images of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Nelson, Nathaniel W.; Parsons, Thomas D.; Grote, Christopher L.; Smith, Clifford A.; II, James R. Sisung
The MMPI-2 Fake Bad Scale: Concordance and Specificity of True and Estimated Scores Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 28, pp. 1–12, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{nelson_mmpi-2_2006,
title = {The MMPI-2 Fake Bad Scale: Concordance and Specificity of True and Estimated Scores},
author = {Nathaniel W. Nelson and Thomas D. Parsons and Christopher L. Grote and Clifford A. Smith and James R. Sisung II},
url = {http://ict.usc.edu/pubs/The%20MMPI-2%20Fake%20Bad%20Scale-%20Concordance%20and%20Specificity%20of%20True%20and%20Estimated%20Scores.pdf},
doi = {10.1080/13803390490919272},
year = {2006},
date = {2006-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {28},
pages = {1–12},
series = {1380-339},
abstract = {A number of recent studies have supported the use of the MMPI-2 Fake Bad Scale (FBS) as a measure of negative response bias, the scale at times demonstrating greater sensitivity to negative response bias than other MMPI-2 validity scales. However, clinicians may not always have access to True FBS (T-FBS) scores, such as when True-False answer sheets are unavailable or published research studies do not report FBS raw scores. Under these conditions, Larrabee (2003a) suggests a linear regression formula that provides estimated FBS (E-FBS) scores derived from weighted validity and clinical T-Scores. The present study intended to validate this regression formula of MMPI-2 E-FBS scores and demonstrate its specificity in a sample of non-litigating, clinically referred, medically intractable epilepsy patients. We predicted that the E-FBS scores would correlate highly (textbackslashtextbackslashtextbackslashtextbackslashtextgreater.70) with the T-FBS scores, that the E-FBS would show comparable correlations with MMPI-2 validity and clinical scales relative to the T-FBS, and that the E-FBS would show an adequate ability to match T-FBS scores using a variety of previously suggested T-FBS raw score cutoffs. Overall, E-FBS scores correlated very highly with T-FBS scores (r = .78, p textbackslashtextbackslashtextbackslashtextbackslashtextless .0001), though correlations were especially high for women (r = .85, p textbackslashtextbackslashtextbackslashtextbackslashtextless .0001) compared to men (r = .62, p textbackslashtextbackslashtextbackslashtextbackslashtextless .001). Thirty-one of 32 (96.9%) comparisons made between E-FBS/T-FBS correlates with other MMPI-2 scales were nonsignificant. When matching to T-FBS "high" and "low" scores, the E-FBS scores demonstrated the highest hit rate (92.5%) through use of Lees-Haley's (1992) revised cutoffs for men and women. These same cutoffs resulted in excellent overall specificity for both the T-FBS scores (92.5%) and E-FBS scores (90.6%). The authors conclude that the E-FBS represents an adequate estimate of T-FBS scores in the current epilepsy sample. Use of E-FBS scores may be especially useful when clinicians conduct the MMPI-2 short form, which does not include all of the 43 FBS items but does include enough items to compute each of the validity and clinical T-Scores. Future studies should examine E-FBS sensitivity in compensation-seekers with incomplete effort.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Parsons, Thomas D.; Sidtis, John J.; Inman, Tina Hanlon; Robertson, Wendy T.; Hall, Colin D.; Price, Richard W.
Timed Gait Test: Normative Data for the Assessment of the AIDS Dementia Complex Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 28, pp. 1053–1064, 2006, ISSN: 1380-3395.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_timed_2006,
title = {Timed Gait Test: Normative Data for the Assessment of the AIDS Dementia Complex},
author = {R. Kevin Robertson and Thomas D. Parsons and John J. Sidtis and Tina Hanlon Inman and Wendy T. Robertson and Colin D. Hall and Richard W. Price},
url = {http://ict.usc.edu/pubs/Timed%20Gait%20Test-%20Normative%20Data%20for%20the%20Assessment%20of%20the%20AIDS%20Dementia%20Complex.pdf},
doi = {10.1080/13803390500205684},
issn = {1380-3395},
year = {2006},
date = {2006-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {28},
pages = {1053–1064},
abstract = {The Timed Gait test is a standardized procedure assessing motor dysfunction of lower extremities and gait abnormalities associated with AIDS dementia complex. Heretofore, interpretations of Timed Gait results have been hampered by the lack of normative data. We provide results on this test derived from 1,549 subjects (HIV-seronegatives (HIV-) and seropositives (HIV+) classified according to ADC stage). Timed Gait was found to be a useful screening and assessment tool for evaluating ADC and correlated with clinical ADC staging as well as more extensive structured neurological and neuropsychological evaluations. Analysis of covariance results (with age and education as covariates) revealed symptomatic HIV+(SX) and AIDS groups having significantly slower Timed Gait scores than those in the HIV– and asymptomatic HIV+(ASX) groups. The SX group obtained significantly slower timed gait scores than those in the AIDS group. There was a significant increase in Timed Gait scores with each increase in dementia staging with the HIV- subjects having the fastest mean Timed Gait scores and the HIV+ dementia stage 2+ having the slowest. These normative data should prove useful in both recognition of ADC and treatment response. Given its minimal training requirements, the Timed Gait would have utility in resource limited settings.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Peers, Pieter; Hawkins, Tim; Debevec, Paul
A Reflective Light Stage Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 04 2006, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{peers_reflective_2006,
title = {A Reflective Light Stage},
author = {Pieter Peers and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-04.2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 04 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel acquisition device to capture high resolution 4D re- flectance fields of real scenes. The device consists of a concave hemispher- ical surface coated with a rough specular paint and a digital video projector with a fish-eye lens positioned near the center of the hemisphere. The scene is placed near the projector, also near the center, and photographed from a fixed vantage point. The projector projects a high-resolution image of incident illu- mination which is reflected by the rough hemispherical surface to become the illumination on the scene. We demonstrate the utility of this device by cap- turing a high resolution hemispherical reflectance field of a specular object which would be difficult to capture using previous acquisition techniques.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Werf, R. J.
Creating Rapport with Virtual Humans Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2006, 2006.
Abstract | Links | BibTeX | Tags:
@techreport{van_der_werf_creating_2006,
title = {Creating Rapport with Virtual Humans},
author = {R. J. Werf},
url = {http://ict.usc.edu/pubs/ICT-TR.02.2006-Rick.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 02 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This report describes the internship about the assignment Creating Rapport with Virtual Humans. The assignment is split up into two separate parts. The first part is to improve the visual feature detection of the current mimicking system [MAA04]. This is going to be done using a Computer Vision approach. Together with two other interns [LAM05] the whole mimicking system was improved, leading to a new Rapport system. The second part involves subject testing with the newly developed system. Firstly the goal is to make a working system that can be reused and expanded in the future. Secondly the goal is to use the data from the subject test to determine if rapport can be created with Virtual Humans. The resulting Rapport system should be a very well reuseable and expandable system. This system makes it possible for other people, unfamiliar with the system, to easily use the system for future testing. Unfortunately too little data was obtained with subject testing to give a solid conclusion whether or not creating rapport with Virtual Humans is possible. The subject testing did lead to a improved testing procedure which makes future testing quite easy.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Parsons, Thomas D.; Kratz, K. M.; Thompson, E.; Stanczyk, F. Z.; Buckwalter, John Galen
DHEA Supplementation and Cognition in Postmenopausal Women Journal Article
In: International Journal of Neuroscience, vol. 16, pp. 141–155, 2006, ISSN: 0020-7454.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_dhea_2006,
title = {DHEA Supplementation and Cognition in Postmenopausal Women},
author = {Thomas D. Parsons and K. M. Kratz and E. Thompson and F. Z. Stanczyk and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/DHEA%20Supplementation%20and%20Cognition%20in%20Postmenopausal%20Women.pdf},
doi = {10.1080/00207450500341506},
issn = {0020-7454},
year = {2006},
date = {2006-01-01},
journal = {International Journal of Neuroscience},
volume = {16},
pages = {141–155},
abstract = {Previous work has suggested that DHEA supplementation may have adverse cognitive effects in elderly women. This article analyzed 24-h measurements of DHEA, DHEAS, and cortisol to determine if cognitive decrease with treatment is mediated by DHEA’s impact on endogenous cortisol. It was found that DHEA administration increased cortisol at several hours during the day. In the treatment group, cortisol was positively associated with cognition at study completion. An increase in negative associations between DHEA(S) levels and cognition was found at completion. Increased cortisol does not explain the cognitive deficits associated with DHEA, suggesting a direct negative effect of exogenous DHEA on cognition.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Roque, Antonio; Ai, Hua; Traum, David
Evaluation of an Information State-Based Dialogue Manager Proceedings Article
In: Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue, Potsdam, Germany, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_evaluation_2006,
title = {Evaluation of an Information State-Based Dialogue Manager},
author = {Antonio Roque and Hua Ai and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20an%20Information%20State-Based%20Dialogue%20Manager.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue},
address = {Potsdam, Germany},
abstract = {We describe an evaluation of an information state-based dialogue manager by measuring its accuracy in information state component updating.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Dillenbourg, Pierre; Traum, David
Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving Journal Article
In: The Journal of the Learning Sciences, vol. 15, no. 1, pp. 121–151, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{dillenbourg_sharing_2006,
title = {Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving},
author = {Pierre Dillenbourg and David Traum},
url = {http://ict.usc.edu/pubs/Sharing%20Solutions-%20Persistence%20and%20Grounding%20in%20Multimodal%20Collaborative%20Problem%20Solving.pdf},
year = {2006},
date = {2006-01-01},
journal = {The Journal of the Learning Sciences},
volume = {15},
number = {1},
pages = {121–151},
abstract = {This article reports on an exploratory study of the relationship between grounding and problem solving in multimodal computer-mediated collaboration. This article examines two different media, a shared whiteboard and a MOO environment that includes a text chat facility. A study was done on how the acknowledgment rate (how often partners give feedback of having perceived, understood, and accepted partner's contributions) varies according to the media and the content of interactions. It was expected that the whiteboard would serve to draw schemata that disambiguate chat utterances. Instead, results show that the whiteboard is primarily used to represent the state of problem solving and the chat is used for grounding information created on the whiteboard. These results are interpreted in terms of persistence: More persistent information is exchanged through the more persistent medium. The whiteboard was used as a shared memory rather than a grounding tool.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Lindheim, Richard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation Book Section
In: Multimodal Intelligent Information Presentation, vol. 27, pp. 305–321, Springer, Netherlands, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{swartout_simulation_2006,
title = {Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Richard Lindheim and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/SIMULATION%20MEETS%20HOLLYWOOD-%20Integrating%20Graphics,%20Sound,%20Story%20and%20Character%20for%20Immersive%20Simulation.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Multimodal Intelligent Information Presentation},
volume = {27},
pages = {305–321},
publisher = {Springer},
address = {Netherlands},
abstract = {The Institute for Creative Technologies was created at the University of Southern California with the goal of bringing together researchers in simulation technology to collaborate with people from the entertainment industry. The idea was that much more compelling simulations could be developed if researchers who understood state-of-the-art simulation technology worked together with writers and directors who knew how to create compelling stories and characters. This paper presents our first major effort to realize that vision, the Mission Rehearsal Exercise Project, which confronts a soldier trainee with the kinds of dilemmas he might reasonably encounter in a peacekeeping operation. The trainee is immersed in a synthetic world and interacts with virtual humans: artificially intelligent and graphically embodied conversational agents that understand and generate natural language, reason about world events and respond appropriately to the trainee's actions or commands. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have also joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. In this paper we describe the Mission Rehearsal Exercise system and the insights gained through this large-scale integration.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Rosenbloom, Paul
A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond Journal Article
In: Tutorials in Quantitative Methods for Psychology, vol. 2, no. 2, pp. 43–51, 2006.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@article{rosenbloom_cognitive_2006,
title = {A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/A%20Cognitive%20Odyssey-%20From%20the%20Power%20Law%20of%20Practice%20to%20a%20General%20Learning%20Mechanism%20and%20Beyond.pdf},
year = {2006},
date = {2006-01-01},
journal = {Tutorials in Quantitative Methods for Psychology},
volume = {2},
number = {2},
pages = {43–51},
abstract = {This article traces a line of research that began with the establishment of a pervasive regularity in human performance – the Power Law of Practice – and proceeded through several decades' worth of investigations that this opened up into learning and cognitive architecture. The results touch on both cognitive psychology and artificial intelligence, and more specifically on the possibily of building general learning mechanisms/systems. It is a story whose final chapter is still to be written.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Feintuch, Uri; Liat, Raz; Hwang, Jane; Josman, Naomi; Katz, Noomi; Kizony, Rachel; Rand, Debbie; Rizzo, Albert; Shahar, Meir; Yongseok, Jang; Weiss, Patrice L. (Tamar)
Integrating haptic-tactile feedback into a video capture based VE for rehabilitation Journal Article
In: CyberPsychology and Behavior, vol. 9, no. 2, pp. 129–132, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{feintuch_integrating_2006,
title = {Integrating haptic-tactile feedback into a video capture based VE for rehabilitation},
author = {Uri Feintuch and Raz Liat and Jane Hwang and Naomi Josman and Noomi Katz and Rachel Kizony and Debbie Rand and Albert Rizzo and Meir Shahar and Jang Yongseok and Patrice L. (Tamar) Weiss},
url = {http://ict.usc.edu/pubs/Integrating%20Haptic-Tactile%20Feedback%20into%20a%20Video-Capture%E2%80%93Based%20Virtual%20Environment%20for%20Rehabilitation.pdf},
year = {2006},
date = {2006-01-01},
journal = {CyberPsychology and Behavior},
volume = {9},
number = {2},
pages = {129–132},
abstract = {Video-capture virtual reality (VR) systems are gaining popularity as intervention tools. Todate, these platforms offer visual and audio feedback but do not provide haptic feedback. Wecontend that adding haptic feedback may enhance the quality of intervention for various theoretical and empirical reasons. This study aims to integrate haptic-tactile feedback into avideo capture system (GX VR), which is currently applied for rehabilitation. The proposedmulti-modal system can deliver audio-visual as well as vibrotactile feedback. The latter isprovided via small vibratory discs attached to the patient's limbs. This paper describes thesystem, the guidelines of its design, and the ongoing usability study.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Parsons, Thomas D.; Horst, Charles; Hall, Colin D.
Thoughts of death and suicidal ideation in nonpsychiatric human immunodeficiency virus seropositive individuals Journal Article
In: Death Studies, vol. 30, pp. 455–469, 2006, ISSN: 0748-1187.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_thoughts_2006,
title = {Thoughts of death and suicidal ideation in nonpsychiatric human immunodeficiency virus seropositive individuals},
author = {R. Kevin Robertson and Thomas D. Parsons and Charles Horst and Colin D. Hall},
url = {http://ict.usc.edu/pubs/THOUGHTS%20OF%20DEATH%20AND%20SUICIDAL%20IDEATION%20IN%20NONPSYCHIATRIC%20HUMAN%20IMMUNODEFICIENCY%20VIRUS%20SEROPOSITIVE%20INDIVIDUALS.pdf},
doi = {10.1080/07481180600614435},
issn = {0748-1187},
year = {2006},
date = {2006-01-01},
journal = {Death Studies},
volume = {30},
pages = {455–469},
abstract = {The present study examines the prevalence of death thoughts and suicidality in HIV infection. Subjects (n = 246) were examined for psychiatric morbidity and suicidality. Compared to high risk HIV seronegatives, HIV seropositives (HIV•) had significantly increased frequency and severity of both suicidal ideation and death thoughts. Two-thirds of seropositives had suicidal ideation at some point; half of the seropositives reported suicide plans and one quarter suicide attempts; and third of seropositives reported current suicidal ideation. Suicidal ideation did not increase with advancing disease. The high prevalence of suicidal ideation suggests inclusion of its assessment in HIV treatment regardless of stage.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Woods, Steven Paul; Rippeth, Julie D.; Conover, Emily; Carey, Catherine L.; Parsons, Thomas D.; Tröster, Alexander I.
Statistical Power of Studies Examining the Cognitive Effects of Subthalamic Nucleus Deep Brain Stimulation in Parkinson's Disease Journal Article
In: The Clinical Neuropsychologist, vol. 20, pp. 27–38, 2006, ISSN: 1385-4046.
Abstract | Links | BibTeX | Tags: MedVR
@article{woods_statistical_2006,
title = {Statistical Power of Studies Examining the Cognitive Effects of Subthalamic Nucleus Deep Brain Stimulation in Parkinson's Disease},
author = {Steven Paul Woods and Julie D. Rippeth and Emily Conover and Catherine L. Carey and Thomas D. Parsons and Alexander I. Tröster},
url = {http://ict.usc.edu/pubs/STATISTICAL%20POWER%20OF%20STUDIES%20EXAMINING%20THE%20COGNITIVE%20EFFECTS%20OF%20SUBTHALAMIC%20NUCLEUS%20DEEP%20BRAIN%20STIMULATION%20IN%20PARKINSON%E2%80%99S%20DISEASE.pdf},
doi = {10.1080/13854040500203290},
issn = {1385-4046},
year = {2006},
date = {2006-01-01},
journal = {The Clinical Neuropsychologist},
volume = {20},
pages = {27–38},
abstract = {It has been argued that neuropsychological studies generally possess adequate statistical power to detect large effect sizes. However, low statistical power is problematic in neuropsychological research involving clinical populations and novel interventions for which available sample sizes are often limited. One notable example of this problem is evident in the literature regarding the cognitive sequelae of deep brain stimulation (DBS) of the subthalamic nucleus (STN) in persons with Parkinson's disease (PD). In the current review, a post hoc estimate of the statistical power of 30 studies examining cognitive effects of STN DBS in PD revealed adequate power to detect substantial cognitive declines (i.e., very large effect sizes), but surprisingly low estimated power to detect cognitive changes associated with conventionally small, medium, and large effect sizes. Such wide spread Type II error risk in the STN DBS cognitive outcomes literature may affect the clinical decision-making process as concerns the possible risk of postsurgical cognitive morbidity, as well as conceptual inferences to be drawn regarding the role of the STN in higher-level cognitive functions. Statistical and methodological recommendations (e.g., meta-analysis) are offered to enhance the power of current and future studies examining the neuropsychological sequelae of STN DBS in PD.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Martinovski, Bilyana
Framework for analysis of mitigation in courts Journal Article
In: Journal of Pragmatics, 2006.
Abstract | Links | BibTeX | Tags:
@article{martinovski_framework_2006,
title = {Framework for analysis of mitigation in courts},
author = {Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Framework%20for%20analysis%20of%20mitigation%20in%20courts.pdf},
year = {2006},
date = {2006-01-01},
journal = {Journal of Pragmatics},
abstract = {This paper presents an activity-based framework for empirical discourse analysis of mitigation in public environments such as Swedish and Bulgarian courtroom examinations. Mitigation is defined as a pragmatic, cognitive and linguistic behavior the main purpose of which is reduction of vulnerability. The suggested framework consists of mitigation processes, which involve mitigating argumentation lines, defense moves, and communicative acts. The functions of mitigation are described in terms of the participants' actions and goals separately from politeness strategies. The conclusions and observations address two things: issues related to the pragmatic theory of communication especially mitigation and issues related to the trial as a social activity. For instance, non-turn-taking confirmations by examiners are often followed by volunteered utterances, which in some cases may be examples of 'rehearsed' testimonies. At the same time the witnesses' tendency to volunteer information even on the behalf of their own credibility indicates that they also favor pro-party testimonies. Despite the objective judicial role of the prosecutor or judge and/or despite the examiners accommodating style the verbal behavior of the witnesses exhibits constant anticipation of danger.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Leuski, Anton; Pair, Jarrell; Traum, David; McNerney, Peter J.; Georgiou, Panayiotis G.; Patel, Ronakkumar
How to Talk to a Hologram Proceedings Article
In: Proceedings of the 11th International Conference on Intelligent User Interfaces, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_how_2006,
title = {How to Talk to a Hologram},
author = {Anton Leuski and Jarrell Pair and David Traum and Peter J. McNerney and Panayiotis G. Georgiou and Ronakkumar Patel},
url = {http://ict.usc.edu/pubs/How%20to%20Talk%20to%20a%20Hologram.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 11th International Conference on Intelligent User Interfaces},
address = {Sydney, Australia},
abstract = {There is a growing need for creating life-like virtual human simulations that can conduct a natural spoken dialog with a human student on a predefined subject. We present an overview of a spoken-dialog system that supports a person interacting with a full-size hologram-like virtual human character in an exhibition kiosk settings. We also give a brief summary of the natural language classification component of the system and describe the experiments we conducted with the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Braaten, Alyssa J.; Hall, Colin D.; Robertson, R. Kevin
Better quality of life with neuropsychological improvement on HAART Journal Article
In: Health and Quality of Life Outcomes, vol. 4, no. 11, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_better_2006,
title = {Better quality of life with neuropsychological improvement on HAART},
author = {Thomas D. Parsons and Alyssa J. Braaten and Colin D. Hall and R. Kevin Robertson},
url = {http://ict.usc.edu/pubs/Better%20quality%20of%20life%20with%20neuropsychological%20improvement%20on%20HAART.pdf},
year = {2006},
date = {2006-01-01},
journal = {Health and Quality of Life Outcomes},
volume = {4},
number = {11},
abstract = {Background: Successful highly active antiretroviral therapy (HAART) regimens have resulted in substantial improvements in the systemic health of HIV infected persons and increased survival times. Despite increased systemic health, the prevalence of minor HIV-associated cognitive impairment appears to be rising with increased longevity, and it remains to be seen what functional outcomes will result from these improvements. Cognitive impairment can dramatically impact functional ability and day-to-day productivity. We assessed the relationship of quality of life (QOL) and neuropsychological functioning with successful HAART treatment. Methods: In a prospective longitudinal study, subjects were evaluated before instituting HAART (naïve) or before changing HAART regimens because current therapy failed to maintain suppression of plasma viral load (treatment failure). Subjects underwent detailed neuropsychological and neurological examinations, as well as psychological evaluation sensitive to possible confounds. Re-evaluation was performed six months after institution of the new HAART regimen and/or if plasma viral load indicated treatment failure. At each evaluation, subjects underwent ultrasensitive HIV RNA quantitative evaluation in both plasma and cerebrospinal fluid. Results: HAART successes performed better than failures on measures exploring speed of mental processing (p textbackslashtextbackslashtextbackslashtextbackslashtextless .02). HAART failure was significantly associated with increased self-reports of physical health complaints (p textbackslashtextbackslashtextbackslashtextbackslashtextless .01) and substance abuse (p textbackslashtextbackslashtextbackslashtextbackslashtextless .01). An interesting trend emerged, in which HAART failures endorsed greater levels of psychological and cognitive complaints (p = 06). Analysis between neuropsychological measures and QOL scores revealed significant. correlation between QOL Total and processing speed (p textbackslashtextbackslashtextbackslashtextbackslashtextless .05), as well as flexibility (p textbackslashtextbackslashtextbackslashtextbackslashtextless .05). Conclusion: Our study investigated the relationship between HIV-associated neurocognitive impairment and quality of life. HAART failures experienced slower psychomotor processing, and had increased self-reports of physical health complaints and substance abuse. Contrariwise, HAART successes experienced improved mental processing, demonstrating the impact of successful treatment on functioning. With increasing life expectancy for those who are HIV seropositive, it is important to measure cognitive functioning in relation to the actual QOL these individuals report. The study results have implications for the optimal management of HIV-infected persons. Specific support or intervention may be beneficial for those who have failed HAART in order to decrease substance abuse and increase overall physical health.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Mao, Wenji; Marsella, Stacy C.
Modeling Social Emotions and Social Attributions Book Section
In: Sun, R. (Ed.): Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation, Cambridge University Press, 2006.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{gratch_modeling_2006,
title = {Modeling Social Emotions and Social Attributions},
author = {Jonathan Gratch and Wenji Mao and Stacy C. Marsella},
editor = {R. Sun},
url = {http://ict.usc.edu/pubs/Modeling%20Social%20Emotions%20and%20Social%20Attributions.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation},
publisher = {Cambridge University Press},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gold, Jeffrey I.; Kim, Seok Hyeon; Kant, Alexis J.; Joseph, Michael H.; Rizzo, Albert
Effectiveness of Virtual Reality for Pediatric Pain Distraction during IV Placement Journal Article
In: CyberPsychology and Behavior, vol. 9, no. 2, pp. 207–212, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{gold_effectiveness_2006,
title = {Effectiveness of Virtual Reality for Pediatric Pain Distraction during IV Placement},
author = {Jeffrey I. Gold and Seok Hyeon Kim and Alexis J. Kant and Michael H. Joseph and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Effectiveness%20of%20Virtual%20Reality%20for%20Pediatric%20Pain%20Distraction%20during%20IV%20Placement.pdf},
year = {2006},
date = {2006-01-01},
journal = {CyberPsychology and Behavior},
volume = {9},
number = {2},
pages = {207–212},
abstract = {The objective of this study was to test the efficacy and suitability of virtual reality (VR) as apain distraction for pediatric intravenous (IV) placement. Twenty children (12 boys, 8 girls) requiring IV placement for a magnetic resonance imaging/computed tomography (MRI/CT) scan were randomly assigned to two conditions: (1) VR distraction using Street Luge(5DT), presented via a head-mounted display, or (2) standard of care (topical anesthetic) with no distraction. Children, their parents, and nurses completed self-report questionnaires that assessed numerous health-related outcomes. Responses from the Faces Pain Scale–Revisedindicated a fourfold increase in affective pain within the control condition; by contrast, nosignificant differences were detected within the VR condition. Significant associations between multiple measures of anticipatory anxiety, affective pain, IV pain intensity, and measures of past procedural pain provided support for the complex interplay of a multimodalassessment of pain perception. There was also a sufficient amount of evidence supportingthe efficacy of Street Luge as a pediatric pain distraction tool during IV placement: an adequate level of presence, no simulator sickness, and significantly more child-, parent-, and nurse-reported satisfaction with pain management. VR pain distraction was positively endorsed by all reporters and is a promising tool for decreasing pain, and anxiety in childrenundergoing acute medical interventions. However, further research with larger sample sizesand other routine medical procedures is warranted.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Tortell, Rebecca; Morie, Jacquelyn
Videogame play and the effectiveness of virtual environments for training Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{tortell_videogame_2006,
title = {Videogame play and the effectiveness of virtual environments for training},
author = {Rebecca Tortell and Jacquelyn Morie},
url = {http://ict.usc.edu/pubs/Videogame%20play%20and%20the%20effectiveness%20of%20virtual%20environments%20for%20training.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
abstract = {The Sensory Environments Evaluation (SEE) project set out to examine the effects of emotional valence of a virtual training scenario on learning and memory. Emotional arousal is well-established as having enhancing effects on memory (McGaugh, 2000). A virtual scenario called DarkCon was created to resemble a night-time reconnaissance mission. Priming of subjects was the first experimental variable. Subjects were randomly assigned to receive their mission briefing in a serious style, suggesting a serious military mission, or in a lighter style, suggesting a fun roleplaying game. The influence of videogame experience was included in analysis of subjects' recall of the environment and of their physiology. In the present study, 34 Army Rangers from Fort Benning, GA underwent the DarkCon mission. Significant effects of priming condition and videogame play were discovered in subjects' recollection of the mission, and in their physiological reactions to highly exciting material. This paper is primarily concerned with the effects of videogame play frequency on subjects' behavior, recall, and physiology. The effects of priming will be cursorily discussed here as they relate to videogame play habits, and explored in more detail on their own in future publications. Directions for future research into the effects of videogame play experience on training are discussed.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2005
Cantzos, Demetrios; Kyriakakis, Chris
Quality Enhancement of Low Bit Rate MPEG1-Layer 3 Audio Based on Audio Resynthesis Proceedings Article
In: Proceedings of the 119th Audio Engineering Society Convention, New York, NY, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{cantzos_quality_2005,
title = {Quality Enhancement of Low Bit Rate MPEG1-Layer 3 Audio Based on Audio Resynthesis},
author = {Demetrios Cantzos and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Quality%20Enhancement%20of%20Low%20Bit%20Rate%20MPEG1-Layer%203%20Audio%20Based%20on%20Audio%20Resynthesis.pdf},
year = {2005},
date = {2005-10-01},
booktitle = {Proceedings of the 119th Audio Engineering Society Convention},
address = {New York, NY},
abstract = {One of the most popular audio compression formats is indisputably the MPEG1-Layer 3 format which is based on the idea of low-bit transparent encoding. As these types of audio signals are starting to migrate from portable players with inexpensive headphones to higher quality home audio systems, it is becoming evident that higher bit rates may be required to maintain transparency. We propose a novel method that enhances low bit rate MP3 encoded audio segments by applying multichannel audio resynthesis methods in a post-processing stage or during decoding. Our algorithm employs the highly efficient Generalized Gaussian mixture model which, combined with cepstral smoothing, leads to very low cepstral reconstruction errors. In addition, residual conversion is applied which proves to significantly improve the enhancement performance. The method presented can be easily generalized to include other audio formats for which sound quality is an issue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Ganesan, Kavita
Automated Story Capture From Conversational Speech Proceedings Article
In: 3rd International Conference on Knowledge Capture (K-CAP 05), Banff, Alberta, Canada, 2005.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_automated_2005,
title = {Automated Story Capture From Conversational Speech},
author = {Andrew S. Gordon and Kavita Ganesan},
url = {http://ict.usc.edu/pubs/Automated%20Story%20Capture%20From%20Conversational%20Speech.pdf},
year = {2005},
date = {2005-10-01},
booktitle = {3rd International Conference on Knowledge Capture (K-CAP 05)},
address = {Banff, Alberta, Canada},
abstract = {While storytelling has long been recognized as an important part of effective knowledge management in organizations, knowledge management technologies have generally not distinguished between stories and other types of discourse. In this paper we describe a new type of technological support for storytelling that involves automatically capturing the stories that people tell to each other in conversations. We describe our first attempt at constructing an automated story extraction system using statistical text classification and a simple voting scheme. We evaluate the performance of this system and demonstrate that useful levels of precision and recall can be obtained when analyzing transcripts of interviews, but that performance on speech recognition data is not above what can be expected by chance. This paper establishes the level of performance that can be obtained using a straightforward approach to story extraction, and outlines ways in which future systems can improve on these results and enable a wide range of knowledge socialization applications.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.; Kenny, Patrick G.; Hovy, Eduard; Narayanan, Shrikanth; Fast, Edward; Martinovski, Bilyana; Baghat, Rahul; Robinson, Susan; Marshall, Andrew; Wang, Dagen; Gandhe, Sudeep; Leuski, Anton
Dealing with Doctors: A Virtual Human for Non-team Interaction Proceedings Article
In: 6th SIGdial Conference on Discourse and Dialogue, Lisbon, Portugal, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_dealing_2005,
title = {Dealing with Doctors: A Virtual Human for Non-team Interaction},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella and Patrick G. Kenny and Eduard Hovy and Shrikanth Narayanan and Edward Fast and Bilyana Martinovski and Rahul Baghat and Susan Robinson and Andrew Marshall and Dagen Wang and Sudeep Gandhe and Anton Leuski},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Doctors.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {6th SIGdial Conference on Discourse and Dialogue},
address = {Lisbon, Portugal},
abstract = {We present a virtual human do tor who an engage in multi-modal negotiation dialogue with people from other organizations. The do tor is part of the SASO-ST system, used for training for non-team intera tions},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan
Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_fight_2005,
title = {Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis},
author = {David Traum and William Swartout and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Fight,%20Flight,%20or%20Negotiate-%20Believable%20Strategies%20for%20Conversing%20under%20Crisis.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {This paper des ribes a model of onversation strategies implemented in virtual humans designed to help people learn negotiation skills. We motivate and dis uss these strategies and their use to allow a virtual human to engage in omplex adversarial negotiation with a human trainee. Choi e of strategy depends on both the personality of the agent and assessment of the likelihood that the negotiation an be bene ial. Exe ution of strategies an be performed by hoosing spe i dialogue behaviors su h as whether and how to respond to a proposal. Current assessment of the value of the topi , the utility of the strategy, and aÆliation toward the other onversants an be used to dynami ally hange strategies throughout the ourse of a onversation. Examples will be given from the SASO-ST proje t, in whi h a trainee learns to negotiate by intera ting with virtual humans who employ these strategies.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Iyer, Kumar; Luigi, Donat-Pierre; Williams, Josh; Dozois, Aimee; Rizzo, Albert
Development of a Data Management Tool for Investigating Multivariate Space and Free Will Experiences Journal Article
In: Applied Psychophysiology and Biofeedback, vol. 30, no. 3, pp. 319–331, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Worlds
@article{morie_development_2005,
title = {Development of a Data Management Tool for Investigating Multivariate Space and Free Will Experiences},
author = {Jacquelyn Morie and Kumar Iyer and Donat-Pierre Luigi and Josh Williams and Aimee Dozois and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20Data%20Management%20Tool%20for%20Investigating%20Multivariate%20Space%20and%20Free%20Will%20Experiences%20in%20Virtual%20Reality.pdf},
year = {2005},
date = {2005-09-01},
journal = {Applied Psychophysiology and Biofeedback},
volume = {30},
number = {3},
pages = {319–331},
abstract = {While achieving realism has been a main goal in making convincing virtual reality (VR) environments, just what constitutes realism is still a question situated firmly in the research domain. VR has become mature enough to be used in therapeutic applications such as clinical exposure therapy with some success. We now need detailed scientific investigations to better understand why VR works for these types of cases, and how it could work for other key applications such as training. Just as in real life, it appears that the factors will be complex and multi-variate, and this plethoric situation presents exceptional challenges to the VR researcher. We would not want to lessen VR’s ability to replicate real world conditions in order to more easily study it, however, for by doing so we may compromise the very qualities that comprise its effectiveness. What is really needed are more robust tools to instrument, organize, and visualize the complex data generated by measurements of participant experiences in a realistic virtual world. We describe here our first study in an ongoing program of effective virtual environment research, the types of data we are dealing with, and a specific tool we have been compelled to create that allows us some measure of control over this data. We call this tool Phloem, after the botanical channels that plants use to transport, support and store nutrients.},
keywords = {MedVR, Virtual Worlds},
pubstate = {published},
tppubtype = {article}
}
Debevec, Paul
Capturing and Simulating Physically Accurate Illumination in Computer Graphics Proceedings Article
In: 11th Annual Symposium on Frontiers of Engineering, Niskayuna, NY, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_capturing_2005,
title = {Capturing and Simulating Physically Accurate Illumination in Computer Graphics},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Simulating%20Physically%20Accurate%20Illumination%20in%20Computer%20Graphics.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {11th Annual Symposium on Frontiers of Engineering},
address = {Niskayuna, NY},
abstract = {Anyone who has seen a recent summer blockbuster has witnessed the dramatic increases in computer-generated realism in recent years. Visual effects supervisors now report that bringing even the most challenging visions of film directors to the screen is no longer a question of whatDs possible; with todayDs techniques it is only a matter of time and cost. Driving this increase in realism have been computer graphics (CG) techniques for simulating how light travels within a scene and for simulating how light reflects off of and through surfaces. These techniquesJsome developed recently, and some originating in the 1980DsJare being applied to the visual effects process by computer graphics artists who have found ways to channel the power of these new tools.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Kallman, Marcelo; Marsella, Stacy C.
Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans Proceedings Article
In: International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kallman_hierarchical_2005,
title = {Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans},
author = {Marcelo Kallman and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Hierarchical%20Motion%20Controllers%20for%20Real-Time%20Autonomous%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Continuous and synchronized whole-body motions are essential for achieving believable autonomous virtual humans in interactive applications. We present a new motion control architecture based on generic controllers that can be hierarchically interconnected and reused in real-time. The hierarchical organization implies that leaf controllers are motion generators while the other nodes are connectors, performing operations such as interpolation, blending, and precise scheduling of children controllers. We also describe how the system can correctly handle the synchronization of gestures with speech in order to achieve believable conversational characters. For that purpose, different types of controllers implement a generic model of the different phases of a gesture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kwon, Soon-il; Narayanan, Shrikanth
Unsupervised Speaker Indexing Using Generic Models Journal Article
In: IEEE Transactions on Speech and Audio Processing, vol. 13, no. 5, pp. 1004–1013, 2005.
Abstract | Links | BibTeX | Tags:
@article{kwon_unsupervised_2005,
title = {Unsupervised Speaker Indexing Using Generic Models},
author = {Soon-il Kwon and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Unsupervised%20Speaker%20Indexing%20Using%20Generic%20Models.pdf},
year = {2005},
date = {2005-09-01},
journal = {IEEE Transactions on Speech and Audio Processing},
volume = {13},
number = {5},
pages = {1004–1013},
abstract = {Unsupervised speaker indexing sequentially detects points where a speaker identity changes in a multispeaker audio stream, and categorizes each speaker segment, without any prior knowledge about the speakers. This paper addresses two chal- lenges: The first relates to sequential speaker change detection. The second relates to speaker modeling in light of the fact that the number/identity of the speakers is unknown. To address this issue, a predetermined generic speaker-independent model set, called the sample speaker models (SSM), is proposed. This set can be useful for more accurate speaker modeling and clustering without requiring training models on target speaker data. Once a speaker-independent model is selected from the generic sample models, it is progressively adapted into a specific speaker-depen- dent model. Experiments were performed with data from the Speaker Recognition Benchmark NIST Speech corpus (1999) and the HUB-4 Broadcast News Evaluation English Test material (1999). Results showed that our new technique, sampled using the Markov Chain Monte Carlo method, gave 92.5% indexing accuracy on two speaker telephone conversations, 89.6% on four-speaker conversations with the telephone speech quality, and 87.2% on broadcast news. The SSMs outperformed the universal background model by up to 29.4% and the universal gender models by up to 22.5% in indexing accuracy in the experiments of this paper.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Performance Geometry Capture for Spatially Varying Relighting Proceedings Article
In: SIGGRAPH 2005 Sketch, Los Angeles, CA, 2005.
Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_performance_2005,
title = {Performance Geometry Capture for Spatially Varying Relighting},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Performance%20Geometry%20Capture%20for%20Spatially%20Varying%20Relighting.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH 2005 Sketch},
address = {Los Angeles, CA},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Sanjit; Chu, Anson; Cohen, Jonathan; Pighin, Frédéric
Fluid Simulation Via Disjoint Translating Grids Proceedings Article
In: Special Interest Group - Graphics Technical Sketch, Los Angeles, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{patel_fluid_2005,
title = {Fluid Simulation Via Disjoint Translating Grids},
author = {Sanjit Patel and Anson Chu and Jonathan Cohen and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Fluid%20Simulation%20Via%20Disjoint%20Translating%20Grids.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {Special Interest Group - Graphics Technical Sketch},
address = {Los Angeles, CA},
abstract = {We present an adaptive fluid simulation technique that splits the computation domain in multiple moving grids. Using this technique, we are able to simulate fluids over large spatial domains with reasonable computation times.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Median Cut Algorithm for Light Probe Sampling Proceedings Article
In: SIGGRAPH (Special Interest Group - Graphics), Los Angeles, CA, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_median_2005,
title = {A Median Cut Algorithm for Light Probe Sampling},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Median%20Cut%20Algorithm%20for%20Light%20Probe%20Sampling.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH (Special Interest Group - Graphics)},
address = {Los Angeles, CA},
abstract = {We present a technique for approximating a light probe image as a constellation of light sources based on a median cut algorithm. The algorithm is efï¬cient, simple to implement, and can realistically represent a complex lighting environment with as few as 64 point light sources.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Williams, Josh; Dozois, Aimee; Luigi, Donat-Pierre
The Fidelity of "Feel": Emotional Affordance in Virtual Environments Proceedings Article
In: 11th International Conference on Human-Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_fidelity_2005,
title = {The Fidelity of "Feel": Emotional Affordance in Virtual Environments},
author = {Jacquelyn Morie and Josh Williams and Aimee Dozois and Donat-Pierre Luigi},
url = {http://ict.usc.edu/pubs/The%20Fidelity%20of%20Feel-%20Emotional%20Affordance%20in%20Virtual%20Environments.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {11th International Conference on Human-Computer Interaction},
address = {Las Vegas, NV},
abstract = {Virtual environments (VEs) should be able to provide experiences as rich and complex as those to be had in real life. While this seems obvious, it is not yet possible to create a perfect simulacrum of the real world, so such correspondence requires the development of design techniques by which VEs can be made to appear more real. It also requires evaluation studies to determine if such techniques produce the desired results. As emotions are implicated in our phenomenological understanding of the physical world, they should also play an integral role in the experience of the virtual one. Therefore, a logical sequence of experimentation to understand how VEs can be made to function as emotion-induction systems is in order. The Sensory Environments Evaluation (SEE) research program has developed a twofold design process to explore if we react to virtually supplied stimuli as we do to the real world equivalents. We look at manipulating both the sensory and emotional aspects of not only the environment but also the participant. We do this with the focus on what emotional affordances this manipulation will provide. Our first evaluation scenario, DarkCon, was designed in this way to produce a strong sense of presence. Sixty-four subjects have been fielded to date and the data is currently being analyzed for results. We hope to find that rich design techniques along with the frame of mind with which a VR experience is approached will predictably influence perception and behavior within a virtual world. We will use these results to inform continuing research into the creation of more emotionally affective VEs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, H. Chad; Core, Mark; Lent, Michael; Solomon, Steve; Gomboc, Dave
Explainable Artificial Intelligence for Training and Tutoring Proceedings Article
In: 12th International Conference on Artificial Intelligence in Education, Amsterdam, The Netherlands, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{lane_explainable_2005,
title = {Explainable Artificial Intelligence for Training and Tutoring},
author = {H. Chad Lane and Mark Core and Michael Lent and Steve Solomon and Dave Gomboc},
url = {http://ict.usc.edu/pubs/Explainable%20Artificial%20Intelligence%20for%20Training%20and%20Tutoring.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {12th International Conference on Artificial Intelligence in Education},
address = {Amsterdam, The Netherlands},
abstract = {This paper describes an Explainable Artificial Intelligence (XAI) tool that allows entities to answer questions about their activities within a tactical simulation. We show how XAI can be used to provide more meaningful after-action reviews and discuss ongoing work to integrate an intelligent tutor into the XAI framework.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
Virtual Humans for non-team interaction training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids, Utrecht, Netherlands, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_virtual_2005,
title = {Virtual Humans for non-team interaction training},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20non-team%20interaction%20training.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids},
address = {Utrecht, Netherlands},
abstract = {We describe a model of virtual humans to be used in training for non-team interactions, such as negotiating with people from other organizations. The virtual humans build on existing task, dialogue, and emotion models, with an added model of trust, which are used to understand and produce interactional moves. The model has been implemented within an agent in the SASO-ST system, and some example dialogues are given, illustrating the necessity for building social bonds.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a computational model of emotion Journal Article
In: Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004, vol. 11, no. 1, pp. 23–43, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_evaluating_2005,
title = {Evaluating a computational model of emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20computational%20model%20of%20emotion.pdf},
year = {2005},
date = {2005-07-01},
journal = {Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004},
volume = {11},
number = {1},
pages = {23–43},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we evaluate them against the phenomena they purport to model. In this paper, we present one method to evaluate an emotion model that compares the behavior of the model against human behavior using a standard clinical instrument for assessing human emotion and coping. We use this method to evaluate the Emotion and Adaptation (EMA) model of emotion Gratch and Marsella. The evaluation highlights strengths of the approach and identifies where the model needs further development.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Riedl, Mark O.; Lane, H. Chad; Hill, Randall W.; Swartout, William
Automated Story Direction and Intelligent Tutoring: Towards a Unifying Architecture Proceedings Article
In: AI and Education 2005 Workshop on Narrative Learning Environments, Amsterdam, The Netherlands, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{riedl_automated_2005,
title = {Automated Story Direction and Intelligent Tutoring: Towards a Unifying Architecture},
author = {Mark O. Riedl and H. Chad Lane and Randall W. Hill and William Swartout},
url = {http://ict.usc.edu/pubs/Automated%20Story%20Direction%20and%20Intelligent%20Tutoring-%20Towards%20a%20Unifying%20Architecture.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {AI and Education 2005 Workshop on Narrative Learning Environments},
address = {Amsterdam, The Netherlands},
abstract = {Recently, interactive storytelling systems H systems that allow a user to make decisions that can potentially impact the direction of a narrative H have been applied to training and education. Interactive storytelling systems often rely on an automated story director to manage the userKs experience. The focus of an automated director is the emergence of a narrative-like experience for the user. In contrast, intelligent tutors traditionally address the acquisition or strengthening of a learner's knowledge. Our goal is to build training simulations that cultivate compelling storylines while simultaneously maintaining a pedagogical presence by incorporating both automated story direction and intelligent tutoring into an immersive environment. But what is the relationship between an automated director and an intelligent tutor? In this paper, we discuss the similarities and differences of automated story directors and intelligent tutors and, based on our analysis, recommend an architecture for building narrative-based training simulations that utilize both effectively and without conflict.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Kim, Gerard J.; Yeh, Shih-Ching; Thiebaux, Marcus; Hwang, Jayne; Buckwalter, John Galen
Development of a Benchmarking Scenario for Testing 3D User Interface Devices and Interaction Methods Proceedings Article
In: Proceedings of the 11th International Conference on Human Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{rizzo_development_2005,
title = {Development of a Benchmarking Scenario for Testing 3D User Interface Devices and Interaction Methods},
author = {Albert Rizzo and Gerard J. Kim and Shih-Ching Yeh and Marcus Thiebaux and Jayne Hwang and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20Benchmarking%20Scenario%20for%20Testing%203D%20User%20Interface%20Devices%20and%20Interaction%20Methods.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {Proceedings of the 11th International Conference on Human Computer Interaction},
address = {Las Vegas, NV},
abstract = {To address a part of the challenge of testing and comparing various 3D user interface devices and methods, we are currently developing and testing a VR 3D User Interface benchmarking scenario. The approach outlined in this paper focuses on the capture of human interaction performance on object selection and manipulation tasks using standardized and scalable block configurations that allow for measurement of speed and efficiency with any interaction device or method. The block configurations that we are using as benchmarking stimuli are accompanied by a pure mental rotation visuospatial assessment test. This feature will allow researchers to test usersX existing spatial abilities and statistically parcel out the variability due to innate ability, from the actual hands-on performance metrics. This statistical approach could lead to a more pure analysis of the ergonomic features of interaction devices and methods separate from existing user abilities. An initial test was conducted at two sites using this benchmarking system to make comparisons between 3D/gesture-based and 2D/mouse-based interactions for 3D selection and manipulation. Our preliminary results demonstrated, as expected, that the 3D/gesture based method in general outperformed the 2D/mouse interface. As well there were statistically significant performance differences between different user groups when categorized by their sex, visuospatial ability and educational background.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Commonsense Psychology and the Functional Requirements of Cognitive Models Proceedings Article
In: American Association of Artificial Intelligence Workshop on Modular Construction of Human-Like Intelligence, AAAI Press, Pittsburgh, PA, 2005.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_commonsense_2005,
title = {Commonsense Psychology and the Functional Requirements of Cognitive Models},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Commonsense%20Psychology%20and%20the%20Functional%20Requirements%20of%20Cognitive%20Models.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {American Association of Artificial Intelligence Workshop on Modular Construction of Human-Like Intelligence},
publisher = {AAAI Press},
address = {Pittsburgh, PA},
abstract = {In this paper we argue that previous models of cognitive abilities (e.g. memory, analogy) have been constructed to satisfy functional requirements of implicit commonsense psychological theories held by researchers and nonresearchers alike. Rather than working to avoid the influence of commonsense psychology in cognitive modeling research, we propose to capitalize on progress in developing formal theories of commonsense psychology to explicitly define the functional requirements of cognitive models. We present a taxonomy of 16 classes of cognitive models that correspond to the representational areas that have been addressed in large-scale inferential theories of commonsense psychology. We consider the functional requirements that can be derived from inferential theories for one of these classes, the processes involved in human memory. We argue that the breadth coverage of commonsense theories can be used to better evaluate the explanatory scope of cognitive models, as well as facilitate the investigation of larger-scale cognitive systems.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Ettaile, Emil; Gandhe, Sudeep; Georgiou, Panayiotis G.; Knight, Kevin; Marcu, Daniel; Narayanan, Shrikanth; Traum, David; Belvin, Robert
Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues Proceedings Article
In: Proceedings of the ACL Interactive Poster and Demonstration Sessions, pp. 89–92, Ann Arbor, MI, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ettaile_transonics_2005,
title = {Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues},
author = {Emil Ettaile and Sudeep Gandhe and Panayiotis G. Georgiou and Kevin Knight and Daniel Marcu and Shrikanth Narayanan and David Traum and Robert Belvin},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {Proceedings of the ACL Interactive Poster and Demonstration Sessions},
pages = {89–92},
address = {Ann Arbor, MI},
abstract = {We briefly describe a two-way speech-to-speech English-Farsi translation system prototype developed for use in doctorpatient interactions. The overarching philosophy of the developers has been to create a system that enables effective communication, rather than focusing on maximizing component-level performance. The discussion focuses on the general approach and evaluation of the system by an independent government evaluation team.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Liao, Wei-Kai; Cohen, Isaac
Classifying Facial Gestures in Presence of Head Motion Proceedings Article
In: IEEE Workshop on Vision for Human-Computer Interaction, San Diego, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{liao_classifying_2005,
title = {Classifying Facial Gestures in Presence of Head Motion},
author = {Wei-Kai Liao and Isaac Cohen},
url = {http://ict.usc.edu/pubs/Classifying%20Facial%20Gestures%20in%20Presence%20of%20Head%20Motion.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {IEEE Workshop on Vision for Human-Computer Interaction},
address = {San Diego, CA},
abstract = {This paper addresses the problem of automatic facial gestures recognition in an interactive environment. Automatic facial gestures recognition is a difficult problem in computer vision, and most of the work has focused on inferring facial gestures in the context of a static head. In the paper we address the challenging problem of recognizing the facial expressions of a moving head. We present a systematic framework to analyze and classify the facial gestures with the head movement. Our system includes a 3D head pose estimation method to recover the global head motion. After estimating the head pose, the human face is modeled by a collection of face's regions. These regions represent the face model used for locating and extracting temporal facial features. We propose using a locally affine motion model to represent extracted motion fields. The classification consists of a graphical model for robustly representing the dependencies of the selected facial regions and the support vector machine. Our experiments show that this approach could classify human expressions in interactive environments accurately.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chu, Chi-Wei; Cohen, Isaac
Posture and Gesture Recognition using 3D Body Shapes Decomposition Proceedings Article
In: IEEE Workshop on Vision for Human-Computer Interaction, San Diego, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{chu_posture_2005,
title = {Posture and Gesture Recognition using 3D Body Shapes Decomposition},
author = {Chi-Wei Chu and Isaac Cohen},
url = {http://ict.usc.edu/pubs/Posture%20and%20Gesture%20Recognition%20using%203D%20Body%20Shapes%20Decomposition.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {IEEE Workshop on Vision for Human-Computer Interaction},
address = {San Diego, CA},
abstract = {We present a method for describing arbitrary human posture as a combination of basic postures. This decomposition allows for recognition of a larger number of postures and gestures from a small set of elementary postures called atoms. We propose a modified version of the matching pursuit algorithm for decomposing an arbitrary input posture into a linear combination of primary and secondary atoms. These atoms are represented through their shape descriptor inferred from the 3D visual-hull of the human body posture. Using an atom-based description of postures increases tremendously the set of recognizable postures while reducing the required training data set. A gesture recognition system based on the atom decomposition and Hidden Markov Model (HMM) is also described. Instead of representing gestures as HMM transition of postures, we separate the description of gestures as two HMMs, each describing the transition of Primary/Secondary atoms; thus greatly reducing the size of state space of HMM. We illustrate the proposed approach for posture and gesture recognition method on a set of video streams captured by four synchronous cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}