Publications
Search
Pestian, John P.; Sorter, Michael; Connolly, Brian; Cohen, Kevin Bretonnel; McCullumsmith, Cheryl; Gee, Jeffry T.; Morency, Louis-Philippe; Scherer, Stefan; Rohlfs, Lesley
A Machine Learning Approach to Identifying the Thought Markers of Suicidal Subjects: A Prospective Multicenter Trial Journal Article
In: Suicide and Life-Threatening Behavior, 2016, ISSN: 03630234.
@article{pestian_machine_2016,
title = {A Machine Learning Approach to Identifying the Thought Markers of Suicidal Subjects: A Prospective Multicenter Trial},
author = {John P. Pestian and Michael Sorter and Brian Connolly and Kevin Bretonnel Cohen and Cheryl McCullumsmith and Jeffry T. Gee and Louis-Philippe Morency and Stefan Scherer and Lesley Rohlfs},
url = {http://doi.wiley.com/10.1111/sltb.12312},
doi = {10.1111/sltb.12312},
issn = {03630234},
year = {2016},
date = {2016-11-01},
journal = {Suicide and Life-Threatening Behavior},
abstract = {Death by suicide demonstrates profound personal suffering and societal failure. While basic sciences provide the opportunity to understand biological markers related to suicide, computer science provides opportunities to understand suicide thought markers. In this novel prospective, multimodal, multicenter, mixed demographic study, we used machine learning to measure and fuse two classes of suicidal thought markers: verbal and nonverbal. Machine learning algorithms were used with the subjects’ words and vocal characteristics to classify 379 subjects recruited from two academic medical centers and a rural community hospital into one of three groups: suicidal, mentally ill but not suicidal, or controls. By combining linguistic and acoustic characteristics, subjects could be classified into one of the three groups with up to 85% accuracy. The results provide insight into how advanced technology can be used for suicide assessment and prevention.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neubauer, Catherine; Woolley, Joshua; Khooshabeh, Peter; Scherer, Stefan
Getting to know you: a multimodal investigation of team behavior and resilience to stress Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 193–200, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{neubauer_getting_2016,
title = {Getting to know you: a multimodal investigation of team behavior and resilience to stress},
author = {Catherine Neubauer and Joshua Woolley and Peter Khooshabeh and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993195},
doi = {10.1145/2993148.2993195},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {193–200},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {Team cohesion has been suggested to be a critical factor in emotional resilience following periods of stress. Team cohesion may depend on several factors including emotional state, communication among team members and even psychophysiological response. The present study sought to employ several multimodal techniques designed to investigate team behavior as a means of understanding resilience to stress. We recruited 40 subjects to perform a cooperative-task in gender-matched, two-person teams. They were responsible for working together to meet a common goal, which was to successfully disarm a simulated bomb. This high-workload task requires successful cooperation and communication among members. We assessed several behaviors that relate to facial expression, word choice and physiological responses (i.e., heart rate variability) within this scenario. A manipulation of an â€oeice breaker†condition was used to induce a level of comfort or familiarity within the team prior to the task. We found that individuals in the â€oeice breaker†condition exhibited better resilience to subjective stress following the task. These individuals also exhibited more insight and cognitive speech, more positive facial expressions and were also able to better regulate their emotional expression during the task, compared to the control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Prendinger, Helmut; Scherer, Stefan
Native vs. Non-native Language Fluency Implications on Multimodal Interaction for Interpersonal Skills Training Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 386–393, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{chollet_native_2016,
title = {Native vs. Non-native Language Fluency Implications on Multimodal Interaction for Interpersonal Skills Training},
author = {Mathieu Chollet and Helmut Prendinger and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993196},
doi = {10.1145/2993148.2993196},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {386–393},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {New technological developments in the eld of multimodal interaction show great promise for the improvement and assessment of public speaking skills. However, it is unclear how the experience of non-native speakers interacting with such technologies di ers from native speakers. In particular, nonnative speakers could bene t less from training with multimodal systems compared to native speakers. Additionally, machine learning models trained for the automatic assessment of public speaking ability on data of native speakers might not be performing well for assessing the performance of non-native speakers. In this paper, we investigate two aspects related to the performance and evaluation of multimodal interaction technologies designed for the improvement and assessment of public speaking between a population of English native speakers and a population of non-native English speakers. Firstly, we compare the experiences and training outcomes of these two populations interacting with a virtual audience system designed for training public speaking ability, collecting a dataset of public speaking presentations in the process. Secondly, using this dataset, we build regression models for predicting public speaking performance on both populations and evaluate these models, both on the population they were trained on and on how they generalize to the second population.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olszewski, Kyle; Lim, Joseph J.; Saito, Shunsuke; Li, Hao
High-fidelity facial and speech animation for VR HMDs Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 6, pp. 1–14, 2016, ISSN: 07300301.
@article{olszewski_high-fidelity_2016,
title = {High-fidelity facial and speech animation for VR HMDs},
author = {Kyle Olszewski and Joseph J. Lim and Shunsuke Saito and Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=2980179.2980252},
doi = {10.1145/2980179.2980252},
issn = {07300301},
year = {2016},
date = {2016-11-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {6},
pages = {1–14},
abstract = {Several significant challenges currently prohibit expressive interaction in virtual reality (VR). The occlusion introduced by modern head-mounted displays (HMDs) makes most existing techniques for facial tracking intractable in this scenario. Furthermore, even state-of-the-art techniques used for real-time facial tracking in less constrained environments fail to capture subtle details of the user’s facial expressions that are essential for compelling speech animation. We introduce a novel system for HMD users to control a digital avatar in real-time while producing plausible speech animation and emotional expressions. Using a monocular camera attached to the front of an HMD, we record video sequences from multiple subjects performing a variety of facial expressions and speaking several phonetically-balanced sentences. These images are used with artist-generated animation data corresponding to these sequences to train a convolutional neural network (CNN) to regress images of a user’s mouth region to the parameters that control a digital avatar. To make training this system more tractable, we make use of audiobased alignment techniques to map images of multiple users making the same utterance to the corresponding animation parameters. We demonstrate that our regression technique is also feasible for tracking the expressions around the user’s eye region, including the eyebrows, with an infrared (IR) camera within the HMD, thereby enabling full facial tracking. This system requires no user-specific calibration, makes use of easily obtainable consumer hardware, and produces high-quality animations of both speech and emotional expressions. Finally, we demonstrate the quality of our system on a variety of subjects and evaluate its performance against state-of-the-art realtime facial tracking techniques.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale; Stratou, Giota; Lieblich, Shari; Gratch, Jonathan
Trust Me: Multimodal Signals of Trustworthiness Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 5–12, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{lucas_trust_2016,
title = {Trust Me: Multimodal Signals of Trustworthiness},
author = {Gale Lucas and Giota Stratou and Shari Lieblich and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993178},
doi = {10.1145/2993148.2993178},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {5–12},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {This paper builds on prior psychological studies that identify signals of trustworthiness between two human negotiators. Unlike prior work, the current work tracks such signals automatically and fuses them into computational models that predict trustworthiness. To achieve this goal, we apply automatic trackers to recordings of human dyads negotiating in a multi-issue bargaining task. We identify behavioral indicators in different modalities (facial expressions, gestures, gaze, and conversational features) that are predictive of trustworthiness. We predict both objective trustworthiness (i.e., are they honest) and perceived trustworthiness (i.e., do they seem honest to their interaction partner). Our experiments show that people are poor judges of objective trustworthiness (i.e., objective and perceived trustworthiness are predicted by different indicators), and that multimodal approaches better predict objective trustworthiness, whereas people overly rely on facial expressions when judging the honesty of their partner. Moreover, domain knowledge (from the literature and prior analysis of behaviors) facilitates the model development process.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Traum, David; Boberg, Jill; Gainer, Alesia; Gratch, Jonathan; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Niki and Julie: A Robot and Virtual Human for Studying Multimodal Social Interaction Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 402–403, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{artstein_niki_2016,
title = {Niki and Julie: A Robot and Virtual Human for Studying Multimodal Social Interaction},
author = {Ron Artstein and David Traum and Jill Boberg and Alesia Gainer and Jonathan Gratch and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2998532},
doi = {10.1145/2993148.2998532},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {402–403},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {We demonstrate two agents, a robot and a virtual human, which can be used for studying factors that impact social influence. The agents engage in dialogue scenarios that build familiarity, share information, and attempt to influence a human participant. The scenarios are variants of the classical “survival task,” where members of a team rank the importance of a number of items (e.g., items that might help one survive a crash in the desert). These are ranked individually and then re-ranked following a team discussion, and the difference in ranking provides an objective measure of social influence. Survival tasks have been used in psychology, virtual human research, and human-robot interaction. Our agents are operated in a “Wizard-of-Oz” fashion, where a hidden human operator chooses the agents’ dialogue actions while interacting with an experiment participant.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark G.; Georgila, Kallirroi; Nye, Benjamin D.; Auerbach, Daniel; Liu, Zhi Fei; DiNinni, Richard
Learning, Adaptive Support, Student Traits, and Engagement in Scenario-Based Learning Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{core_learning_2016,
title = {Learning, Adaptive Support, Student Traits, and Engagement in Scenario-Based Learning},
author = {Mark G. Core and Kallirroi Georgila and Benjamin D. Nye and Daniel Auerbach and Zhi Fei Liu and Richard DiNinni},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {Scenario-based training systems pose an especially difficult challenge for an intelligent tutoring system (ITS). In addition to the basic problems of deciding when to intervene and what guidance to provide, the ITS must decide whether to give guidance directly (e.g., a hint message), indirectly through positive/negative results in the scenario, or to delay guidance until a post-scenario review session. There are a number of factors that an adaptive ITS should consider and we use self-report survey instruments to investigate the relationship between traits, learning strategies, expectations, learner behaviors derived from log files, post-use perceptions of the system, and pre-test and post-test results. We use the ELITE Lite Counseling training system as a testbed for our experiments. This system uses virtual role players to allow learners to practice leadership counseling skills, and is in use at the United States Military Academy (USMA). This paper analyzes two data sets. We collected data from local university students, a non-military population of roughly the same age as USMA Cadets using the system. For these local participants, we could administer surveys and pre-tests and post-tests, and collect log files recording clicks made while using ELITE Lite. The second data set comes from USMA itself but is limited to log files. In both populations, the ITS’s hints are effective at boosting scenario performance, and for the university students, the overall experience promoted learning, and survey results suggest that higher levels of organization in study habits may lead to greater learning with ELITE Lite. For the USMA Cadets, ELITE Lite is part of their Military Leadership course rather than an experiment, which could explain why we found higher scenario performance on average than the non-military population, and more use of the post-scenario review feature.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale; Szablowski, Evan; Gratch, Jonathan; Feng, Andrew; Huang, Tiffany; Boberg, Jill; Shapiro, Ari
The effect of operating a virtual doppleganger in a 3D simulation Proceedings Article
In: Proceedings of the 9th International Conference on Motion in Games, pp. 167–174, ACM Press, Burlingame, CA, 2016, ISBN: 978-1-4503-4592-7.
@inproceedings{lucas_effect_2016,
title = {The effect of operating a virtual doppleganger in a 3D simulation},
author = {Gale Lucas and Evan Szablowski and Jonathan Gratch and Andrew Feng and Tiffany Huang and Jill Boberg and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2994258.2994263},
doi = {10.1145/2994258.2994263},
isbn = {978-1-4503-4592-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 9th International Conference on Motion in Games},
pages = {167–174},
publisher = {ACM Press},
address = {Burlingame, CA},
abstract = {Recent advances in scanning technology have enabled the widespread capture of 3D character models based on human subjects. Intuition suggests that, with these new capabilities to create avatars that look like their users, every player should have his or her own avatar to play video games or simulations. We explicitly test the impact of having one’s own avatar (vs. a yoked control avatar) in a simulation (i.e., maze running task with mines). We test the impact of avatar identity on both subjective (e.g., feeling connected and engaged, liking avatar’s appearance, feeling upset when avatar’s injured, enjoying the game) and behavioral variables (e.g., time to complete task, speed, number of mines triggered, riskiness of maze path chosen). Results indicate that having an avatar that looks like the user improves their subjective experience, but there is no significant effect on how users perform in the simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ryan, James; Swanson, Reid
Recognizing Coherent Narrative Blog Content Proceedings Article
In: Proceeedings of the International Conference on Interactive Digital Storytelling, pp. 234–246, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-48278-1 978-3-319-48279-8.
@inproceedings{ryan_recognizing_2016,
title = {Recognizing Coherent Narrative Blog Content},
author = {James Ryan and Reid Swanson},
url = {http://link.springer.com/10.1007/978-3-319-48279-8_21},
doi = {10.1007/978-3-319-48279-8_21},
isbn = {978-3-319-48278-1 978-3-319-48279-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceeedings of the International Conference on Interactive Digital Storytelling},
pages = {234–246},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Interactive storytelling applications have at their disposal massive numbers of human-authored stories, in the form of narrative weblog posts, from which story content could be harvested and repurposed. Such repurposing is currently inhibited, however, in that many blog narratives are not sufficiently coherent for use in these applications. In a narrative that is not coherent, the order of the events in the narrative is not clear given the text of the story. We present the results of a study exploring automatic methods for estimating the coherence of narrative blog posts. In the end, our simplest model—one that only considers the degree to which story text is capitalized and punctuated—vastly outperformed a baseline model and, curiously, a series of more sophisticated models. Future work may use this simple model as a baseline, or may use it along with the classifier that it extends to automatically extract large numbers of narrative blog posts from the web for purposes such as interactive storytelling.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Saito, Shunsuke; Li, Tianye; Li, Hao
Real-Time Facial Segmentation and Performance Capture from RGB Input Proceedings Article
In: Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016), pp. 244–261, Springer International Publishing, Amsterdam, The Netherlands, 2016, ISBN: 978-3-319-46483-1 978-3-319-46484-8.
@inproceedings{saito_real-time_2016,
title = {Real-Time Facial Segmentation and Performance Capture from RGB Input},
author = {Shunsuke Saito and Tianye Li and Hao Li},
url = {https://link.springer.com/chapter/10.1007/978-3-319-46484-8_15},
isbn = {978-3-319-46483-1 978-3-319-46484-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016)},
pages = {244–261},
publisher = {Springer International Publishing},
address = {Amsterdam, The Netherlands},
abstract = {We introduce the concept of unconstrained real-time 3D facial performance capture through explicit semantic segmentation in the RGB input. To ensure robustness, cutting edge supervised learning approaches rely on large training datasets of face images captured in the wild. While impressive tracking quality has been demonstrated for faces that are largely visible, any occlusion due to hair, accessories, or hand-to-face gestures would result in significant visual artifacts and loss of tracking accuracy. The modeling of occlusions has been mostly avoided due to its immense space of appearance variability. To address this curse of high dimensionality, we perform tracking in unconstrained images assuming non-face regions can be fully masked out. Along with recent breakthroughs in deep learning, we demonstrate that pixel-level facial segmentation is possible in real-time by repurposing convolutional neural networks designed originally for general semantic segmentation. We develop an efficient architecture based on a two-stream deconvolution network with complementary characteristics, and introduce carefully designed training samples and data augmentation strategies for improved segmentation accuracy and robustness. We adopt a state-of-the-art regression-based facial tracking framework with segmented face images as training, and demonstrate accurate and uninterrupted facial performance capture in the presence of extreme occlusion and even side views. Furthermore, the resulting segmentation can be directly used to composite partial 3D face models on the input images and enable seamless facial manipulation tasks, such as virtual make-up or face replacement.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Rhuizhe; Wei, Lingyu; Vouga, Etienne; Huang, Qixing; Ceylan, Duygu; Medioni, Gerard; Li, Hao
Capturing Dynamic Textured Surfaces of Moving Targets Proceedings Article
In: Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016 Spotlight Presentation), Springer International Publishing, Amsterdam, The Netherlands, 2016, ISBN: 978-3-319-46477-0 978-3-319-46478-7.
@inproceedings{wang_capturing_2016,
title = {Capturing Dynamic Textured Surfaces of Moving Targets},
author = {Rhuizhe Wang and Lingyu Wei and Etienne Vouga and Qixing Huang and Duygu Ceylan and Gerard Medioni and Hao Li},
url = {https://link.springer.com/chapter/10.1007/978-3-319-46478-7_17},
isbn = {978-3-319-46477-0 978-3-319-46478-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016 Spotlight Presentation)},
publisher = {Springer International Publishing},
address = {Amsterdam, The Netherlands},
abstract = {We present an end-to-end system for reconstructing complete watertight and textured models of moving subjects such as clothed humans and animals, using only three or four handheld sensors. The heart of our framework is a new pairwise registration algorithm that minimizes, using a particle swarm strategy, an alignment error metric based on mutual visibility and occlusion. We show that this algorithm reliably registers partial scans with as little as 15% overlap without requiring any initial correspondences, and outperforms alternative global registration algorithms. This registration algorithm allows us to reconstruct moving subjects from free-viewpoint video produced by consumer-grade sensors, without extensive sensor calibration, constrained capture volume, expensive arrays of cameras, or templates of the subject geometry.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, J. Adam; Krum, David M.; Bolas, Mark T.
Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments Journal Article
In: ACM Transactions on Applied Perception, vol. 14, no. 2, pp. 1–17, 2016, ISSN: 15443558.
@article{jones_vertical_2016,
title = {Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments},
author = {J. Adam Jones and David M. Krum and Mark T. Bolas},
url = {http://dl.acm.org/citation.cfm?id=2983631},
doi = {10.1145/2983631},
issn = {15443558},
year = {2016},
date = {2016-10-01},
journal = {ACM Transactions on Applied Perception},
volume = {14},
number = {2},
pages = {1–17},
abstract = {In this article, we detail a series of experiments that examines the effect of vertical field-of-view extension and the addition of non-specific peripheral visual stimulation on gait characteristics and distance judgments in a head-worn virtual environment. Specifically, we examined four field-of-view configurations: a common 60° diagonal field of view (48° × 40°), a 60° diagonal field of view with the addition of a luminous white frame in the far periphery, a field of view with an extended upper edge, and a field of view with an extended lower edge. We found that extension of the field of view, either with spatially congruent or spatially non-informative visuals, resulted in improved distance judgments and changes in observed posture. However, these effects were not equal across all field-of-view configurations, suggesting that some configurations may be more appropriate than others when balancing performance, cost, and ergonomics.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Feng, Andrew W.; Seymour, Mike; Shapiro, Ari
Smart Mobile Virtual Characters: Video Characters vs. Animated Characters Proceedings Article
In: Proceedings of the Fourth International Conference on Human Agent Interaction, pp. 371–374, ACM Press, Biopolis, Singapore, 2016, ISBN: 978-1-4503-4508-8.
@inproceedings{kang_smart_2016,
title = {Smart Mobile Virtual Characters: Video Characters vs. Animated Characters},
author = {Sin-Hwa Kang and Andrew W. Feng and Mike Seymour and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2980511},
doi = {10.1145/2974804.2980511},
isbn = {978-1-4503-4508-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the Fourth International Conference on Human Agent Interaction},
pages = {371–374},
publisher = {ACM Press},
address = {Biopolis, Singapore},
abstract = {This study investigates presentation techniques for a chatbased virtual human that communicates engagingly with users via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with an animated 3D virtual character as opposed to a real human video character capable of displaying backchannel behaviors. The findings of our study demonstrate that people are socially attracted to a 3D animated character that does not display backchannel behaviors more than a real human video character that presents realistic backchannel behaviors. People engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that exhibits backchannel behaviors, compared to communicating with a real human video character that does not display backchannel behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Feng, Andrew W.; Seymour, Mike; Shapiro, Ari
Study comparing video-based characters and 3D-based characters on mobile devices for chat Proceedings Article
In: Proceedings of the 9th International Conference on Motion in Games, pp. 181–186, ACM Press, Burlingame, California, 2016, ISBN: 978-1-4503-4592-7.
@inproceedings{kang_study_2016,
title = {Study comparing video-based characters and 3D-based characters on mobile devices for chat},
author = {Sin-Hwa Kang and Andrew W. Feng and Mike Seymour and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2994274},
doi = {10.1145/2994258.2994274},
isbn = {978-1-4503-4592-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 9th International Conference on Motion in Games},
pages = {181–186},
publisher = {ACM Press},
address = {Burlingame, California},
abstract = {This study explores presentation techniques for a chat-based virtual human that communicates engagingly with users. Interactions with the virtual human occur via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with an animated virtual character as opposed to a real human video character capable of displaying realistic backchannel behaviors. An audio-only interface is compared additionally with the two types of characters. The findings of our study suggest that people are socially attracted to a 3D animated character that does not display backchannel behaviors more than a real human video character that presents realistic backchannel behaviors. People engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that exhibits realistic backchannel behaviors, compared to communicating with a real human video character that does not display backchannel behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Joshi, Himanshu; Rosenbloom, Paul S.; Ustun, Volkan
Continuous phone recognition in the Sigma cognitive architecture Journal Article
In: Biologically Inspired Cognitive Architectures, vol. 18, pp. 23–32, 2016, ISSN: 2212683X.
@article{joshi_continuous_2016,
title = {Continuous phone recognition in the Sigma cognitive architecture},
author = {Himanshu Joshi and Paul S. Rosenbloom and Volkan Ustun},
url = {http://linkinghub.elsevier.com/retrieve/pii/S2212683X16300652},
doi = {10.1016/j.bica.2016.09.001},
issn = {2212683X},
year = {2016},
date = {2016-10-01},
journal = {Biologically Inspired Cognitive Architectures},
volume = {18},
pages = {23–32},
abstract = {Spoken language processing is an important capability of human intelligence that has hitherto been unexplored by cognitive architectures. This reflects on both the symbolic and sub-symbolic nature of the speech problem, and the capabilities provided by cognitive architectures to model the latter and its rich interplay with the former. Sigma has been designed to leverage the state-of-the-art hybrid (discrete + continuous) mixed (symbolic + probabilistic) capability of graphical models to provide in a uniform non-modular fashion effective forms of, and integration across, both cognitive and sub-cognitive behavior. In this article, previous work on speaker dependent isolated word recognition has been extended to demonstrate Sigma’s feasibility to process a stream of fluent audio and recognize phones, in an online and incremental manner with speaker independence. Phone recognition is an important step in integrating spoken language processing into Sigma. This work also extends the acoustic front-end used in the previous work in service of speaker independence. All of the knowledge used in phone recognition was added supraarchitecturally – i.e. on top of the architecture – without requiring the addition of new mechanisms to the architecture.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bernardet, Ulysses; Chollet, Mathieu; DiPaola, Steve; Scherer, Stefan
An Architecture for Biologically Grounded Real-Time Reflexive Behavior Book Section
In: Intelligent Virtual Agents, vol. 10011, pp. 295–305, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-47664-3 978-3-319-47665-0.
@incollection{bernardet_architecture_2016,
title = {An Architecture for Biologically Grounded Real-Time Reflexive Behavior},
author = {Ulysses Bernardet and Mathieu Chollet and Steve DiPaola and Stefan Scherer},
url = {http://download.springer.com/static/pdf/224/chp%253A10.1007%252F978-3-319-47665-0_26.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-319-47665-0_26&token2=exp=1485296780 acl=%2Fstatic%2Fpdf%2F224%2Fchp%25253A10.1007%25252F978-3-319-47665-0_26.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Fchapter%252F10.1007%252F978-3-319-47665-0_26* hmac=1bf37d11eda93937fedd36843994ffdaf645ebda569c86edbcf61ca905942f89},
isbn = {978-3-319-47664-3 978-3-319-47665-0},
year = {2016},
date = {2016-10-01},
booktitle = {Intelligent Virtual Agents},
volume = {10011},
pages = {295–305},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {In this paper, we present a reflexive behavior architecture, that is geared towards the application in the control of the non-verbal behavior of the virtual humans in a public speaking training system. The model is organized along the distinction between behavior triggers that are internal (endogenous) to the agent, and those that origin in the environment (exogenous). The endogenous subsystem controls gaze behavior, triggers self-adaptors, and shifts between different postures, while the exogenous system controls the reaction towards auditory stimuli with different temporal and valence characteristics. We evaluate the different components empirically by letting participants compare the output of the proposed system to valid alternative variations.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Valstar, Michel; Gratch, Jonathan; Schuller, Björn; Ringeval, Fabien; Lalanne, Denis; Torres, Mercedes Torres; Scherer, Stefen; Stratou, Giota; Cowie, Roddy; Pantic, Maja
AVEC 2016: Depression, Mood, and Emotion Recognition Workshop and Challenge Proceedings Article
In: Proceedings of the 6th International Workshop on Audio/Visual Emotion Challenge, pp. 3–10, ACM Press, Amsterdam, The Netherlands, 2016, ISBN: 978-1-4503-4516-3.
@inproceedings{valstar_avec_2016,
title = {AVEC 2016: Depression, Mood, and Emotion Recognition Workshop and Challenge},
author = {Michel Valstar and Jonathan Gratch and Björn Schuller and Fabien Ringeval and Denis Lalanne and Mercedes Torres Torres and Stefen Scherer and Giota Stratou and Roddy Cowie and Maja Pantic},
url = {http://dl.acm.org/citation.cfm?id=2988258},
doi = {10.1145/2988257.2988258},
isbn = {978-1-4503-4516-3},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 6th International Workshop on Audio/Visual Emotion Challenge},
pages = {3–10},
publisher = {ACM Press},
address = {Amsterdam, The Netherlands},
abstract = {The Audio/Visual Emotion Challenge and Workshop (AVEC 2016) "Depression, Mood and Emotion" will be the sixth competition event aimed at comparison of multimedia processing and machine learning methods for automatic audio, visual and physiological depression and emotion analysis, with all participants competing under strictly the same conditions. The goal of the Challenge is to provide a common benchmark test set for multi-modal information processing and to bring together the depression and emotion recognition communities, as well as the audio, video and physiological processing communities, to compare the relative merits of the various approaches to depression and emotion recognition under well-defined and strictly comparable conditions and establish to what extent fusion of the approaches is possible and beneficial. This paper presents the challenge guidelines, the common data used, and the performance of the baseline system on the two tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Tin, Jessica; Brown, Jeremy; Fritzsch, Elisabeth; Gabber, Shirley
Wochat Chatbot User Experience Summary Proceedings Article
In: Proceedings of the 2016 IVA: WOCHAT Workshop, Zerotype, Los Angeles, CA, 2016.
@inproceedings{gordon_wochat_2016,
title = {Wochat Chatbot User Experience Summary},
author = {Carla Gordon and Jessica Tin and Jeremy Brown and Elisabeth Fritzsch and Shirley Gabber},
url = {http://workshop.colips.org/wochat/documents/ST-281.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 IVA: WOCHAT Workshop},
publisher = {Zerotype},
address = {Los Angeles, CA},
abstract = {A team of 5 interns at the USC Institute for Creative Technologies interacted with 5 of the 6 chatbots; IRIS, Sammy, Sarah, TickTock and Joker. Unfortunately no one in our team could get the 6th chatbot, pyEliza, working. We found that there were certainly some chatbots that were better than others, and some of us were surprised by how distinct each bot felt from the others. One member commented on how they felt as though each different chatbot had an individual “voice” so to speak. Others were surprised by just how much of a “personality” the bots seemed to have. Most members of our team cited IRIS as their favorite, in terms of being capable of producing naturalistic conversation, with Sammy taking a close second. However, only one member of the team was able to interact with Sarah and TickTock, but that member cited TickTock as a capable conversation partner, and Sarah as being the best bot on a number of measures including appropriateness of responses and overall conversation cohesiveness. Therefore, perhaps if more members had been able to interact with Sarah and TickTock they may have ranked higher. Lastly, Joker was by far our least favorite, with whom no member of our team was able to have anything resembling a naturalistic or even cohesive conversation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dennison, Mark; Neubauer, Cathy; Passaro, Tony; Harrison, Andre; Scherer, Stefan; Khooshabeh, Pete
Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task Proceedings Article
In: Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop, Los Angeles, CA, 2016.
@inproceedings{dennison_using_2016,
title = {Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task},
author = {Mark Dennison and Cathy Neubauer and Tony Passaro and Andre Harrison and Stefan Scherer and Pete Khooshabeh},
url = {http://marksdennison.com/s/DennisonPAVA2016.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop},
address = {Los Angeles, CA},
abstract = {Teams of two individuals worked together in a high-intensity simu-lated bomb diffusing task. Half the teams were given icebreaker social time to increase comfort and familiarity with each other and the remaining half of the teams served as controls and did not meet until the task began. Electrocardiog-raphy and impedance cardiography were recorded to examine cardiac changes during task cooperation. Changes in ventricular contractility showed that individ-uals who had taken part in the icebreaker showed increased task engagement over time whereas controls showed the opposite. Data also trended to show that ice-breaker participants were in a challenge state and controls were in a threat state during the final thirty seconds of bomb defusal. Finally, we show that a set of cardiac features can be used to classify participant data as belonging to the ice-breaker or control groups with an accuracy as high as 88%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahn, Emily; Morbini, Fabrizio; Gordon, Andrew S.
Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing Proceedings Article
In: Proceedings of the 9th International Natural Language Generation Conference (INLG-2016), Edinburgh, UK, 2016.
@inproceedings{ahn_improving_2016,
title = {Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing},
author = {Emily Ahn and Fabrizio Morbini and Andrew S. Gordon},
url = {https://www.researchgate.net/publication/307512031_Improving_Fluency_in_Narrative_Text_Generation_With_Grammatical_Transformations_and_Probabilistic_Parsing},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 9th International Natural Language Generation Conference (INLG-2016)},
address = {Edinburgh, UK},
abstract = {In research on automatic generation of narrative text, story events are often formally represented as a causal graph. When serializing and realizing this causal graph as natural language text, simple approaches produce cumbersome sentences with repetitive syntactic structure, e.g. long chains of “because” clauses. In our research, we show that the fluency of narrative text generated from causal graphs can be improved by applying rule-based grammatical transformations to generate many sentence variations with equivalent semantics, then selecting the variation that has the highest probability using a probabilistic syntactic parser. We evaluate our approach by generating narrative text from causal graphs that encode 100 brief stories involving the same three characters, based on a classic film of experimental social psychology. Crowdsourced workers judged the writing quality of texts generated with ranked transformations as significantly higher than those without, and not significantly lower than human-authored narratives of the same situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2008
Artstein, Ron; Poesio, Massimo
Inter-Coder Agreement for Computational Linguistics Journal Article
In: Computational Linguistics, vol. 34, no. 4, pp. 555–596, 2008.
Abstract | Links | BibTeX | Tags:
@article{artstein_inter-coder_2008,
title = {Inter-Coder Agreement for Computational Linguistics},
author = {Ron Artstein and Massimo Poesio},
url = {http://ict.usc.edu/pubs/Inter-Coder%20Agreement%20for%20Computational%20Linguistics.pdf},
year = {2008},
date = {2008-12-01},
journal = {Computational Linguistics},
volume = {34},
number = {4},
pages = {555–596},
abstract = {This article is a survey of methods for measuring agreement among corpus annotators. It exposes the mathematics and underlying assumptions of agreement coefficients, covering Krippendorff's alpha as well as Scott's pi and Cohen's kappa; discusses the use of coefficients in several annotation tasks; and argues that weighted, alpha-like coefficients, traditionally less used than kappa-like measures in Computational Linguistics, may be more appropriate for many corpus annotation tasks – but that their use makes the interpretation of the value of the coefficient even harder.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Cannon, Jacob; Gandhe, Sudeep; Gerten, Jillian; Henderer, Joe; Leuski, Anton; Traum, David
Coherence of Off-Topic Response for a Virtual Character Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_coherence_2008,
title = {Coherence of Off-Topic Response for a Virtual Character},
author = {Ron Artstein and Jacob Cannon and Sudeep Gandhe and Jillian Gerten and Joe Henderer and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/COHERENCE%20OF%20OFF-TOPIC%20RESPONSES%20FOR%20A%20VIRTUAL%20CHARACTER.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {We demonstrate three classes of off-topic responses which allow a virtual question-answering character to handle cases where it does not understand the user's input: ask for clarification, indicate misunderstanding, and move on with the conversation. While falling short of full dialogue management, a combination of such responses together with prompts to change the topic can improve overall dialogue coherence.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Leuski, Anton; Roque, Antonio; Gandhe, Sudeep; DeVault, David; Gerten, Jillian; Robinson, Susan; Martinovski, Bilyana
Natural Language Dialogue Architectures for Tactical Questioning Characters Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_natural_2008,
title = {Natural Language Dialogue Architectures for Tactical Questioning Characters},
author = {David Traum and Anton Leuski and Antonio Roque and Sudeep Gandhe and David DeVault and Jillian Gerten and Susan Robinson and Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Natural%20Language%20Dialogue%20Architectures.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper we contrast three architectures for natural language questioning characters. We contrast the relative costs and benefits of each approach in building characters for tactical questioning. The first architecture works purely at the textual level, using cross-language information retrieval techniques to learn the best output for any input from a training set of linked questions and answers. The second architecture adds a global emotional model and computes a compliance model, which can result in different outputs for different levels, given the same inputs. The third architecture works at a semantic level and allows authoring of different policies for response for different kinds of information. We describe these architectures and their strengths and weaknesses with respect to expressive capacity, performance, and authoring demands.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Parsons, Thomas D.; Pair, Jarrell; McLay, Robert N.; Johnston, Scott; Perlman, Karen; Deal, Robert; Reger, Greg; Gahm, Greg; Roy, Michael; Shilling, Russell; Rothbaum, Barbara O.; Graap, Ken; Spitalnick, Josh; Bordnick, Patrick; Difede, JoAnn
Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_clinical_2008,
title = {Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD},
author = {Albert Rizzo and Thomas D. Parsons and Jarrell Pair and Robert N. McLay and Scott Johnston and Karen Perlman and Robert Deal and Greg Reger and Greg Gahm and Michael Roy and Russell Shilling and Barbara O. Rothbaum and Ken Graap and Josh Spitalnick and Patrick Bordnick and JoAnn Difede},
url = {http://ict.usc.edu/pubs/Clinical%20Results%20from%20the%20Virtual%20Iraq%20Esposure%20Therapy%20Application%20for%20PTSD.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 5 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been previously used with reports of positive outcomes. The current paper will present the rationale and description of a VR PTSD therapy application (Virtual Iraq) and present initial findings from its use with active duty service members. Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Results from an open clinical trial using Virtual Iraq at the Naval Medical Center-San Diego with 20 treatment completers indicate that 16 no longer met PTSD diagnostic criteria at post-treatment, with only one not maintaining treatment gains at 3 month follow-up.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Hawkins, Tim; Peers, Pieter; Frederiksen, Sune; Debevec, Paul
Practical Modeling and Acquisition of Layered Facial Reflectance Journal Article
In: ACM Transaction on Graphics, vol. 27, no. 5, 2008.
Abstract | Links | BibTeX | Tags: Graphics
@article{ghosh_practical_2008,
title = {Practical Modeling and Acquisition of Layered Facial Reflectance},
author = {Abhijeet Ghosh and Tim Hawkins and Pieter Peers and Sune Frederiksen and Paul Debevec},
url = {http://ict.usc.edu/pubs/Practical%20Modeling%20and%20Acquisition%20of%20Layered%20Facial%20Reflectance.pdf},
year = {2008},
date = {2008-12-01},
journal = {ACM Transaction on Graphics},
volume = {27},
number = {5},
abstract = {We present a practical method for modeling layered facial reflectance consisting of specular reflectance, single scattering, and shallow and deep subsurface scattering. We estimate parameters of appropriate reflectance models for each of these layers from just 20 photographs recorded in a few seconds from a single viewpoint. We extract spatially-varying specular reflectance and single-scattering parameters from polarization-difference images under spherical and point source illumination. Next, we employ direct-indirect separation to decompose the remaining multiple scattering observed under cross-polarization into shallow and deep scattering components to model the light transport through multiple layers of skin. Finally, we match appropriate diffusion models to the extracted shallow and deep scattering components for different regions on the face. We validate our technique by comparing renderings of subjects to reference photographs recorded from novel viewpoints and under novel illumination conditions.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
McAlinden, Ryan; Bosack, Matthew; Macha, Adrian; Vargas, Esau; Walker, Tim; Mann, John; Cruz, Julio
Towards an Automated Pipeline for the Translation and Optimization of Geospatial Data for Virtual Environments Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{mcalinden_towards_2008,
title = {Towards an Automated Pipeline for the Translation and Optimization of Geospatial Data for Virtual Environments},
author = {Ryan McAlinden and Matthew Bosack and Adrian Macha and Esau Vargas and Tim Walker and John Mann and Julio Cruz},
url = {http://ict.usc.edu/pubs/Towards%20an%20Automated%20Pipeline%20for%20the%20Translation%20and%20Optimization%20of%20Geospatial%20Data%20for%20Virtual%20Environments.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {The infusion of commercial game technology into U.S. Army training, simulation, and instructional domains has resulted in more immersive and engaging experiences for Soldiers to hone their skills. However, the influx of such technology comes at a significant cost, specifically in the creation of virtual environments in which these skills are simulated and practiced. Today's typical commercial triple-A game title cost upwards of $40-$60M and four to six years to develop, much of which is spent on producing the digital assets used to populate the scene (models, animations, etc). Additionally, this content is often suited for a custom type of rendering technology, and often cannot be reused without significant manual modification. Unfortunately, the Army has neither the financial or personnel resources available to create such highly immersive, reusable virtual content, nor the time to invest when current operations call for training or simulation data in a matter of hours, not months or years. In this paper, we discuss a research initiative aimed at significantly reducing the time and cost for converting, optimizing, and enhancing existing geospatial data for today's virtual environments. The goal is a completely automated process for ingesting existing military terrain data and outputting a technology-agnostic representation in less than 24 hours.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Treskunov, Anton; Sherstyuk, Andrei; Wang, Kin Lik; Pair, Jarrell
Real Binoculars with Virtual Functions for Mixed Environments Proceedings Article
In: International Conference on Advances in Computer Entertainment Technology 2008, Yokohama, Japan, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{treskunov_real_2008,
title = {Real Binoculars with Virtual Functions for Mixed Environments},
author = {Anton Treskunov and Andrei Sherstyuk and Kin Lik Wang and Jarrell Pair},
url = {http://ict.usc.edu/pubs/Real%20Binoculars%20with%20Virtual%20Functions%20for%20Mixed%20Environments.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {International Conference on Advances in Computer Entertainment Technology 2008},
address = {Yokohama, Japan},
abstract = {Though often desirable, the integration of real and virtual elements in mixed reality environments can be di⬚cult. We propose a number of techniques to facilitate scene exploration and object selection by giving users real instruments as props while implementing their functionality in a virtual part of the environment. Speci cally, we present a family of tools built upon the idea of using real binoculars for viewing virtual content. This approach matches user expectations with the tool's capabilities enhancing the sense of presence and increasing the depth of interaction between the real and virtual components of the scene. We also discuss possible applications of these tools and the results of our user study. This paper is an extended version of earlier work presented at the 4th International Workshop on the Tangible Space Initiative[5].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mower, Emily; Mataric, Maja J.; Narayanan, Shrikanth
Selection of Emotionally Salient Audio-Visual Features for Modeling Human Evaluations of Synthetic Character Emotion Displays Proceedings Article
In: Proceedings of the IEEE International Symposium on Multimedia, Berkeley, CA, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{mower_selection_2008,
title = {Selection of Emotionally Salient Audio-Visual Features for Modeling Human Evaluations of Synthetic Character Emotion Displays},
author = {Emily Mower and Maja J. Mataric and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Selection%20of%20Emotionally%20Salient%20Audio-Visual%20Features%20for%20Modeling%20Human%20Evaluations%20of%20Synthetic%20Character%20Emotion%20Displays.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the IEEE International Symposium on Multimedia},
address = {Berkeley, CA},
abstract = {Computer simulated avatars and humanoid robots have an increasingly prominent place in today's world. Accep- tance of these synthetic characters depends on their ability to properly and recognizably convey basic emotion states to a user population. This study presents an analysis of audio- visual features that can be used to predict user evaluations of synthetic character emotion displays. These features in- clude prosodic, spectral, and semantic properties of audio signals in addition to FACS-inspired video features [11]. The goal of this paper is to identify the audio-visual fea- tures that explain the variance in the emotional evaluations of na ̈ıve listeners through the utilization of information gain feature selection in conjunction with support vector ma- chines. These results suggest that there exists an emotion- ally salient subset of the audio-visual feature space. The features that contribute most to the explanation of evalua- tor variance are the prior knowledge audio statistics (e.g., average valence rating), the high energy band spectral com- ponents, and the quartile pitch range. This feature subset should be correctly modeled and implemented in the design of synthetic expressive displays to convey the desired emo- tions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swanson, Reid; Gordon, Andrew S.
Say Anything: A Massively collaborative Open Domain Story Writing Companion Proceedings Article
In: First International Conference on Interactive Digital Storytelling, Erfurt, Germany, 2008.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{swanson_say_2008,
title = {Say Anything: A Massively collaborative Open Domain Story Writing Companion},
author = {Reid Swanson and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Say%20Anything-%20A%20Massively%20collaborative%20Open%20Domain%20Story%20Writing%20Companion.pdf},
year = {2008},
date = {2008-11-01},
booktitle = {First International Conference on Interactive Digital Storytelling},
address = {Erfurt, Germany},
abstract = {Interactive storytelling is an interesting cross-disciplinary area that has importance in research as well as entertainment. In this paper we explore a new area of interactive storytelling that blurs the line between traditional interactive fiction and collaborative writing. We present a system where the user and computer take turns in writing sentences of a fictional narrative. Sentences contributed by the computer are selected from a collection of millions of stories extracted from Internet weblogs. By leveraging the large amounts of personal narrative content available on the web, we show that even with a simple approach our system can produce compelling stories with our users.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Han, Kyu J.; Georgiou, Panayiotis G.; Narayanan, Shrikanth
The SAIL Speaker Diarization System for Analysis of Spontaneous Meetings Proceedings Article
In: Proceedings of IEEE International Workshop on Multimedia Signal Processing (MMSP), Cairns, Australia, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{han_sail_2008,
title = {The SAIL Speaker Diarization System for Analysis of Spontaneous Meetings},
author = {Kyu J. Han and Panayiotis G. Georgiou and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/The%20SAIL%20Speaker%20Diarization%20System%20for%20Analysis%20of%20Spontaneous%20Meetings.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of IEEE International Workshop on Multimedia Signal Processing (MMSP)},
address = {Cairns, Australia},
abstract = {In this paper, we propose a novel approach to speaker diarization of spontaneous meetings in our own mul- timodal SmartRoom environment. The proposed speaker di- arization system first applies a sequential clustering concept to segmentation of a given audio data source, and then performs agglomerative hierarchical clustering for speaker-specific classi- fication (or speaker clustering) of speech segments. The speaker clustering algorithm utilizes an incremental Gaussian mixture cluster modeling strategy, and a stopping point estimation method based on information change rate. Through experiments on various meeting conversation data of approximately 200 minutes total length, this system is demonstrated to provide diarization error rate of 18.90% on average.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pataki, Caroly; Sugar, Jeff; Kenny, Patrick G.; Parsons, Thomas D.; Rizzo, Albert; Pato, Michele; George, Cheryl St.
A Virtual Adolescent Patient with PTSD for Training Psychiatrists Proceedings Article
In: Proceedings of the 55th Annual Meeting of the American Academy of Child Adolescent Psychiatry, Chicago, IL, 2008.
@inproceedings{pataki_virtual_2008,
title = {A Virtual Adolescent Patient with PTSD for Training Psychiatrists},
author = {Caroly Pataki and Jeff Sugar and Patrick G. Kenny and Thomas D. Parsons and Albert Rizzo and Michele Pato and Cheryl St. George},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Adolescent%20Patient%20with%20PTSD%20for%20Training%20Psychiatrists.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of the 55th Annual Meeting of the American Academy of Child Adolescent Psychiatry},
address = {Chicago, IL},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Pearce, Celia
Uses of Digital Enchantment: Computer Games as the New Fairy Tales Proceedings Article
In: Proceedings of the Vienna Games Conference 2008: The Future of Reality and Gaming (FROG), Vienna, Austria, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_uses_2008,
title = {Uses of Digital Enchantment: Computer Games as the New Fairy Tales},
author = {Jacquelyn Morie and Celia Pearce},
url = {http://ict.usc.edu/pubs/The_uses_of_digital_enchantment.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of the Vienna Games Conference 2008: The Future of Reality and Gaming (FROG)},
address = {Vienna, Austria},
abstract = {In this paper we argue that digital games have come to fill the cultural niche traditionally occupied by fairytales, and that they are ideally suited to realize some of the unique characteristics of this genre of folklore and literature. Arguably one of the most influential authors on game narrative and genre, J.R.R. Tolkien wrote extensively about fairytales, authored fairytales and considered his great epic work of high fantasy, "The Trilogy of the Ring," to be a fairy tale of sorts. He argued that fairytales were not about fairies per se but took place in the "realm of faerie," the magical world that fairies inhabit. "The realm of fairy-story is wide and deep and high and filled with many things: all manner of beasts and birds are found there; shoreless seas and stars uncounted; beauty that is an enchantment, and ever-present peril; both joy and sorrow as sharp as swords." [1] The "realm of faerie" provides a context for archetypal characters and narratives that express the inner life of the child and the process of transitioning to adulthood, a universal theme with has equal resonance with adults. In The Uses of Enchantment, controversial psychologist Bruno Betttelheim argues that "The motifs of fairy tales are experienced as wondrous because the child feels understood and appreciated deep down in his feelings, hopes, and anxieties, without these all having to be dragged up and investigated in the harsh light of a rationality that is still beyond him." [2] "...the internal processes are externalized and become comprehensible as represented by the figures of the story and its events." [3] These externalized processes can be seen in a wide range of digital games that put the player in the role of fairytale heroine, or more often, hero. Single-player adventure-style games such as the Zelda and Final Fantasy series, Ico, Shadow of the Collosus, Beyond Good and Evil, Okami and the Longest Journey series bring the unique affordances of the computer as a purveyor of magic to bear on this classic literary genre. Science fiction author Arthur C. Clark famously asserted that "Any sufficiently advanced technology is indistinguishable from magic." [4] Frederick Brooks, in The Mythical Man-Month [5], brings another level of refinement to this by describing the alchemic conjuring qualities of the computer thusly: "One types the correct incantation on a keyboard and a display screen comes to life, showing things that never were nor could be." Indeed even the nomenclature of MUDs, in which programmers are referred to as "wizards," seems to confer this quality of magical enchantment to the very creators of games themselves. Given its propensity for magic, the computer is particularly well-suited as a means of expression for the fairytale genre, shifting the focus from empathy with a central character engaged in an epic journey, to endowing a player with the agency to fulfill his or her destiny. We see the trajectory of the "realm of faerie" in the tradition from Tolkien's literary masterworks to the contemporary MMOG. Tolkien's world formed the inspiration for the tabletop role-playing games of the seventies, particularly Dungeons and Dragons, which gave rise to the MUDs of the 1980s and finally the fully realized multiplayer 3D computer fantasy worlds of the 1990s to the present, and the recent release of Lord of the Rings Online. This instrumentalizaton of fantasy environments through mathematical constructs provided a vital transition for the fairytale genre from the world of words to the world of numbers, and hence the world of computers. Today, the fantasy worlds of Tolkien, as well as the new fairy tales of game developers, have been rendered in their full glory via the "correct incantation on a keyboard." While it remains to be seen how or if these new digital fairytales will stand the tests of time as their literary counterparts have done, we argue that they fulfill a similar and vital role in providing today's children a sense of ritual and power in their own hero's journey from child to adulthood. References [1] Tolkien, J.R.R. (1966). The Tolkien Reader. New York: Ballantine. [2] Bettelheim, Bruno. (1975). The Uses of Enchantment: The Meaning and Importance of Fairy Tales. New York: Alfred K. Knopf. [3] Ibid [4] Clark, Arthur C. (1962). Profiles of the Future; an Inquiry into the Limits of the Possible. New York: Harper & Row. [5] Brooks, Frederick P. (1975). The mythical man month: Essays on software engineering. Reading, MA: Addison-Wesley.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn
The Performance of the Self and Its Effect on Presence in Virtual Worlds Proceedings Article
In: Proceedings of the 11th Annual International Workshop on Presence, pp. 265–269, Padova, Italy, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_performance_2008,
title = {The Performance of the Self and Its Effect on Presence in Virtual Worlds},
author = {Jacquelyn Morie},
url = {http://ict.usc.edu/pubs/The%20Performance%20of%20the%20Self%20and%20Its%20Effect%20on%20Presence%20in%20Virtual%20Worlds.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of the 11th Annual International Workshop on Presence},
pages = {265–269},
address = {Padova, Italy},
abstract = {This paper addresses the many types of roles that people adopt within digital arenas such as online virtual worlds, and how those authored selves can enhance the sense of Self presence. Erving Goffman maintains that we play many roles in our everyday lives and that our identity is constantly being redefined by both aspects of a situation and the other people with whom we interact. With the explosion of online virtual worlds, the possibilities for such performances of self have multiplied. We now have more opportunities to explore aspects of our personalities including those that we might be reluctant to expose in real life situations. This is a new development for virtual reality: participants can create their appearance in online virtual worlds and become extremely connected to it. The potential for these personas to affect and enhance the sense of Presence should be addressed, and both quantitative and qualitative methods developed to measure their effects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; DeVault, David; Roque, Antonio; Martinovski, Bilyana; Artstein, Ron; Leuski, Anton; Gerten, Jillian; Traum, David
From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters Proceedings Article
In: Proceedings of InterSpeech, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_domain_2008,
title = {From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters},
author = {Sudeep Gandhe and David DeVault and Antonio Roque and Bilyana Martinovski and Ron Artstein and Anton Leuski and Jillian Gerten and David Traum},
url = {http://ict.usc.edu/pubs/From%20Domain%20Specification%20to%20Virtual%20Humans.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of InterSpeech},
abstract = {We present a new approach for rapidly developing dialogue capabilities for virtual humans. Starting from domain specification, an integrated authoring interface automatically generates dialogue acts with all possible contents.These dialogue acts are linked to example utterances in order to provide training data for natural language understanding and generation. The virtual human dialogue system contains a dialogue manager following the information-state approach, using finite-state machines and SCXML to manage local coherence, as well as explicit modeling of emotions and compliance level and a grounding component based on evidence of understanding. Using the authoring tools, we design and implement a version of the virtual human Hassan and compare to previous architectures for the character.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Story Management Technologies for Organizational Learning Proceedings Article
In: International Conference on Knowledge Management, Special Track on Intelligent Assistance for Self-Directed and Organizational Learning, Graz, Austria, 2008.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_story_2008,
title = {Story Management Technologies for Organizational Learning},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Story%20Management%20Technologies%20for%20Organizational%20Learning.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {International Conference on Knowledge Management, Special Track on Intelligent Assistance for Self-Directed and Organizational Learning},
address = {Graz, Austria},
abstract = {The stories told among members of an organization are an effective instrument for knowledge socialization, the sharing of experiences through social mechanisms. However, the utility of stories for organizational learning is limited due to the difficulties in acquiring stories that are relevant to the practices of an organization, identifying the learning goals that these stories serve, and delivering these stories to the right people and the right time in a manner that best facilitates learning. In this paper we outline a vision for story-based organizational learning in the future, and describe three areas where intelligent technologies can be applied to automate story management practices in support of organizational learning. First, we describe automated story capture technologies that identify narratives of people's experiences within the context of a larger discourse. Second, we describe automated retrieval technologies that identify stories that are relevant to specific educational needs. Third, we describe how stories can be transformed into effective story-based learning environments with minimal development costs.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels Journal Article
In: Lecture Notes in Computer Science, vol. 5208, pp. 484–485, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{de_melo_evolving_2008,
title = {Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evolving%20Expression%20of%20Emotions%20in%20Virtual%20Humans%20Using%20Lights%20and%20Pixels.pdf},
year = {2008},
date = {2008-09-01},
journal = {Lecture Notes in Computer Science},
volume = {5208},
pages = {484–485},
abstract = {nspired by the arts, this paper addresses the challenge of expressing emotions in virtual humans using the environment's lights and the screen's pixels. An evolutionary approach is proposed which relies on genetic algorithms to learn how to map emotions into these forms of expression. The algorithm evolves populations of hypotheses, where each hypothesis represents a configuration of lighting and screen expression. Hypotheses are evaluated by a critic ensemble composed of artificial and human critics. The need for human critics is motivated by a study which reveals the limitations of an approach that relies only on artificial critics that follow principles from art literature. We also address the need for the model to improve with experience and to adapt to the individual, social and cultural values in the arts. Finally, a second study is described where subjects successfully evolved mappings for joy and sadness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rizzo, Albert
Virtual Human Patients for Training of Clinical Interview and Communication Skills Proceedings Article
In: Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology, Maia, Portugal, 2008, ISBN: 07 049 15 00 6.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{parsons_virtual_2008,
title = {Virtual Human Patients for Training of Clinical Interview and Communication Skills},
author = {Thomas D. Parsons and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Human%20Patients%20for%20Training%20of%20Clinical%20Interview%20and%20Communication%20Skills.pdf},
isbn = {07 049 15 00 6},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Maia, Portugal},
abstract = {Although schools commonly make use of standardized patients to teach interview skills, the diversity of the scenarios standardized patients can characterize is limited by availability of human actors. Virtual Human Agent technology has evolved to a point where esearchers may begin developing mental health applications that make use of virtual reality patients. The work presented here is a preliminary attempt at what we believe to be a large application area. Herein we describe an ongoing study of our virtual patients. We present an approach that allows novice mental health clinicians to conduct an interview with virtual character that emulates 1) an adolescent male with conduct disorder; and 2) an adolescent female who has recently been physically traumatized.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Swanson, Reid
Envisioning With Weblogs Proceedings Article
In: International Conference on New Media Technology, Special Track on Knowledge Acquisition From the Social Web, Graz, Austria, 2008.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_envisioning_2008,
title = {Envisioning With Weblogs},
author = {Andrew S. Gordon and Reid Swanson},
url = {http://ict.usc.edu/pubs/Envisioning%20With%20Weblogs.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {International Conference on New Media Technology, Special Track on Knowledge Acquisition From the Social Web},
address = {Graz, Austria},
abstract = {In this position paper we present a vision of how the stories that people tell in Internet weblogs can be used directly for automated commonsense reasoning, specifically to support the core envisionment functions of event prediction, explanation, and imagination.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Gratch, Jonathan; Hartholt, Arno; Marsella, Stacy C.; Lee, Jina
Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents Proceedings Article
In: Proceedings of the 8th International Conference on Intelligent Virtual Agents, pp. 117–130, Tokyo, Japan, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_multi-party_2008,
title = {Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents},
author = {David Traum and Jonathan Gratch and Arno Hartholt and Stacy C. Marsella and Jina Lee},
url = {http://ict.usc.edu/pubs/Multi-party,%20Multi-issue,%20Multi-strategy%20Negotiation.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 8th International Conference on Intelligent Virtual Agents},
pages = {117–130},
address = {Tokyo, Japan},
abstract = {We present a model of negotiation for virtual agents that extends previous work to be more human-like and applicable to a broader range of situations, including more than two negotiators with different goals, and negotiating over multiple options. The agents can dynamically change their negotiating strategies based on the current values of several parameters and factors that can be updated in the course of the negotiation.We have implemented this model and done preliminary evaluation within a prototype training system and a three-party negotiation with two virtual humans and one human.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Sherstyuk, Andrei; Treskunov, Anton; Berg, Benjamin
Fast Geometry Acquisition for Mixed Reality Applications Using Motion Tracking Proceedings Article
In: 7th IEEE and ACM International Symposium on Mixed and Augmented Reality - ISMAR 2008, Cambridge, UK, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{sherstyuk_fast_2008,
title = {Fast Geometry Acquisition for Mixed Reality Applications Using Motion Tracking},
author = {Andrei Sherstyuk and Anton Treskunov and Benjamin Berg},
url = {http://ict.usc.edu/pubs/Fast%20Geometry%20Acquisition%20for%20Mixed%20Reality%20Applications%20Using%20Motion%20Tracking.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {7th IEEE and ACM International Symposium on Mixed and Augmented Reality - ISMAR 2008},
address = {Cambridge, UK},
abstract = {Mixing real and virtual elements into one environment often in- volves creating geometry models of physical objects. Traditional approaches include manual modeling by 3D artists or use of ded- icated devices. Both approaches require special skills or special hardware and may be costly. We propose a new method for fast semi-automatic 3D geom- etry acquisition, based upon unconventional use of motion track- ing equipment. The proposed method is intended for quick surface prototyping for Virtual, Augmented and Mixed reality applications where quality of visualization of objects is not required or is of low priority.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Velson, Martin
Towards Real-time Authoring of Believable Agents in Interactive Narrative Proceedings Article
In: 8th International Conference on Intelligent Virtual Agents, Tokyo, Japan, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{van_velson_towards_2008,
title = {Towards Real-time Authoring of Believable Agents in Interactive Narrative},
author = {Martin Velson},
url = {http://ict.usc.edu/pubs/Towards%20real%20time%20authoring%20of%20believable%20agents.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {8th International Conference on Intelligent Virtual Agents},
address = {Tokyo, Japan},
abstract = {In this paper we present an authoring tool called Narratoria that allows non-technical experts in the field of digital entertainment to create interactive narratives with 3D graphics and multimedia. Narratoria allows experts in digital entertainment to participate in the generation of story-based military training applications. Users of the tools can create story-arcs, screenplays, pedagogical goals and AI models using a single software application. Using commercial game engines, which provide direct visual output in a real-time feedback-loop, users can view the final product as they edit.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Cosand, Louise; Courtney, Chris; Iyer, Arvind; Rizzo, Albert
Neuropsychological Assessment using the Virtual Reality Cognitive Performance Assessment Test Proceedings Article
In: Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology, 2008.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{parsons_neuropsychological_2008,
title = {Neuropsychological Assessment using the Virtual Reality Cognitive Performance Assessment Test},
author = {Thomas D. Parsons and Louise Cosand and Chris Courtney and Arvind Iyer and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Neurocognitive%20Workload%20Assessment%20Using%20the%20Virtual%20Reality%20Cognitive%20Performance%20Assessment%20Test.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology},
abstract = {The traditional approach to assessing neurocognitive performance makes use of paper and pencil neuropsychological assessments. This received approach has been criticized as limited in the area of ecological validity. The newly developed Virtual Reality Cognitive Performance Assessment Test (VRCPAT) focuses upon enhanced ecological validity using virtual environment scenarios to assess neurocognitive processing. The VRCPAT battery and a europsychological assessment were conducted with a sample of healthy adults. Findings suggest 1) good construct validity for the Memory Module; and 2) that increase in stimulus complexity and stimulus intensity can manipulate attention performance within the Attention Module.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Bolas, Mark; Lange, Belinda; Dallas, I.; Rizzo, Albert
Engaging breathing exercises: developing an interactive XNA-based air flow sensing and control system Proceedings Article
In: Virtual Rehabilitation, pp. 72, Vancouver, CA, 2008.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@inproceedings{bolas_engaging_2008,
title = {Engaging breathing exercises: developing an interactive XNA-based air flow sensing and control system},
author = {Mark Bolas and Belinda Lange and I. Dallas and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Engaging%20breathing%20exercises-%20developing%20an%20interactive%20XNA-based%20air%20flow%20sensing%20and%20control%20system.jpg},
year = {2008},
date = {2008-08-01},
booktitle = {Virtual Rehabilitation},
pages = {72},
address = {Vancouver, CA},
abstract = {The aim of this project was to make breathing exercises for children with Cystic Fibrosis fun. We developed a prototype device that uses breathing to control specifically designed video games.},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Sun, Xu; Okanoharay, Daisuke; Tsujii, Jun'ichi
Modeling Latent-Dynamic in Shallow Parsing: A Latent Conditional Model with Improved Inference Proceedings Article
In: The 22nd International Conference on Computational Linguistics (COLING 2008), Manchester, UK, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{morency_modeling_2008,
title = {Modeling Latent-Dynamic in Shallow Parsing: A Latent Conditional Model with Improved Inference},
author = {Louis-Philippe Morency and Xu Sun and Daisuke Okanoharay and Jun'ichi Tsujii},
url = {http://www.ict.usc.edu/pubs/Modeling%20Latent-Dynamic%20in%20Shallow%20Parsing.pdf},
year = {2008},
date = {2008-08-01},
booktitle = {The 22nd International Conference on Computational Linguistics (COLING 2008)},
address = {Manchester, UK},
abstract = {Shallow parsing is one of many NLP tasks that can be reduced to a sequence labeling problem. In this paper we show that the latent-dynamics (i.e., hidden substructure of shallow phrases) constitutes a problem in shallow parsing, and we show that modeling this intermediate structure is useful. By analyzing the automatically learned hidden states, we show how the latent conditional model explicitly learn latent-dynamics. We propose in this paper the Best Label Path (BLP) inference algorithm, which is able to produce the most probable label sequence on latent conditional models. It outperforms two existing inference algorithms. With the BLP inference, the LDCRF model significantly outperforms CRF models on word features, and achieves comparable performance of the most successful shallow parsers on the CoNLL data when further using part-ofspeech features.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Jones, Andrew; Hawkins, Tim; Chiang, Jen-Yuan; Debevec, Paul
A high-resolution geometry capture system for facial performance Proceedings Article
In: SIGGRAPH, Los Angeles, CA, 2008.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ma_high-resolution_2008,
title = {A high-resolution geometry capture system for facial performance},
author = {Wan-Chun Ma and Andrew Jones and Tim Hawkins and Jen-Yuan Chiang and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20high-resolution%20geometry%20capture%20system%20for%20facial%20performance.pdf},
year = {2008},
date = {2008-08-01},
booktitle = {SIGGRAPH},
address = {Los Angeles, CA},
abstract = {Results The two cameras capture data at a resolution of 2400× 1800 (Bayer pattern). With a internal RAM storage of 12GB, the maximum recording time is around 5 seconds. The result of each scan contains a high resolution mesh that usually consists of 1M triangles, a smoothed medium resolution mesh, a color texture, a world-space normal map, and a displacement map represents the difference between the high resolution mesh and the smoothed mesh.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Sagae, Kenji; Tsujii, Jun'ichi
Shift-reduce dependency DAG parsing Proceedings Article
In: 22nd International Conference on Computational Linguistics (Coling 2008), Manchester, UK, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{sagae_shift-reduce_2008,
title = {Shift-reduce dependency DAG parsing},
author = {Kenji Sagae and Jun'ichi Tsujii},
url = {http://www.ict.usc.edu/pubs/Shift-reduce%20dependency%20DAG%20parsing.pdf},
year = {2008},
date = {2008-08-01},
booktitle = {22nd International Conference on Computational Linguistics (Coling 2008)},
address = {Manchester, UK},
abstract = {Most data-driven dependency parsing approaches assume that sentence structure is represented as trees. Although trees have several desirable properties from both computational and linguistic perspectives, the structure of linguistic phenomena that goes beyond shallow syntax often cannot be fully captured by tree representations. We present a parsing approach that is nearly as simple as current data-driven transition-based dependency parsing frameworks, but outputs directed acyclic graphs (DAGs). We demonstrate the benefits of DAG parsing in two experiments where its advantages over dependency tree parsing can be clearly observed: predicate-argument analysis of English and syntactic analysis of Danish with a representation that includes long-distance dependencies and anaphoric reference links.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Rizzo, Albert
Virtual Humans for Assisted Health Care Proceedings Article
In: Pervasive Technologies for Assistive Environments (PETRA) Conference Proceedings, ACM, Athens, Greece, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_virtual_2008-1,
title = {Virtual Humans for Assisted Health Care},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20Assisted%20Health%20Care.pdf},
year = {2008},
date = {2008-07-01},
booktitle = {Pervasive Technologies for Assistive Environments (PETRA) Conference Proceedings},
publisher = {ACM},
address = {Athens, Greece},
abstract = {There is a growing need for applications that can dynamically interact with aging populations to gather information, monitor their health care, provide information, or even act as companions. Virtual human agents or virtual characters offer a technology that can enable human users to overcome the confusing interfaces found in current human-computer interactions. These artificially intelligent virtual characters have speech recognition, natural language and vision that will allow human users to interact with their computers in a more natural way. Additionally, sensors may be used to monitor the environment for specific behaviors that can be fused into a virtual human system. As a result, the virtual human may respond to a patient or elderly person in a manner that will have a powerful affect on their living situation. This paper will describe the virtual human technology developed and some current applications that apply the technology to virtual patients for mental health diagnosis and clinician training. Additionally the paper will discuss possible ways in which the virtual humans may be utilized for assisted health care and for the integration of multi-modal input to enhance the virtual human system.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Solomon, Steve; Gratch, Jonathan; Bulitko, Vadim; Lent, Michael
Modeling Culturally and Emotionally Affected Behavior Proceedings Article
In: The 10th International Conference on the Simulation of Adaptive Behavior (SAB); Workshop on the role of emotion in adaptive behavior and cognitive robotics., Osaka, Japan, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{solomon_modeling_2008,
title = {Modeling Culturally and Emotionally Affected Behavior},
author = {Steve Solomon and Jonathan Gratch and Vadim Bulitko and Michael Lent},
url = {http://www.ict.usc.edu//pubs/Modeling Culturally and Emotionally Affected Behavior.pdf},
year = {2008},
date = {2008-07-01},
booktitle = {The 10th International Conference on the Simulation of Adaptive Behavior (SAB); Workshop on the role of emotion in adaptive behavior and cognitive robotics.},
address = {Osaka, Japan},
abstract = {Culture and emotions have a profound impact on human behavior. Consequently, high-fidelity simulated interactive environments (e.g., trainers and computer games) that involve virtual humans must model socio-cultural and emotional affects on agent behavior. In this paper we discuss two recently fielded systems that do so independently: Culturally Affected Behavior (CAB) and EMotion and Adaptation (EMA). We then propose a simple language that combines the two systems in a natural way thereby enabling simultaneous simulation of culturally and emotionally affected behavior. The proposed language is based on matrix algebra and can be easily implemented on single- or multi-core hardware with a standard matrix package (e.g., MATLAB or a C++ library). We then show how to extend the combined culture and emotion model with an explicit representation of religion and personality profiles.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
An Evaluation Understudy for Dialogue Coherence Models Proceedings Article
In: 9th SIGdial Workshop on Discourse and Dialogue, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_evaluation_2008,
title = {An Evaluation Understudy for Dialogue Coherence Models},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/An%20Evaluation%20Understudy%20for%20Dialogue%20Coherence%20Models.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th SIGdial Workshop on Discourse and Dialogue},
abstract = {Evaluating a dialogue system is seen as a major challenge within the dialogue research community. Due to the very nature of the task, most of the evaluation methods need a substantial amount of human involvement. Following the tradition in machine translation, summarization and discourse coherence modeling, we introduce the the idea of evaluation understudy for dialogue coherence models. Following (Lapata, 2006), we use the information ordering task as a testbed for evaluating dialogue coherence models. This paper reports findings about the reliability of the information ordering task as applied to dialogues. We find that simple n-gram co-occurrence statistics similar in spirit to BLEU (Papineni et al., 2001) correlate very well with human judgments for dialogue coherence.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Johnson, W. Lewis
The Politeness Effect in an Intelligent Foreign Language Tutoring System Proceedings Article
In: 9th International Conference on Intelligent Tutoring Systems, Montreal, Quebec, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{wang_politeness_2008,
title = {The Politeness Effect in an Intelligent Foreign Language Tutoring System},
author = {Ning Wang and W. Lewis Johnson},
url = {http://ict.usc.edu/pubs/The%20Politeness%20Effect.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th International Conference on Intelligent Tutoring Systems},
address = {Montreal, Quebec},
abstract = {When applying Reeves and Nass's Media Equation [22] to pedagogical agent research, we seek to focus on the manner in which a pedagogical agent communicates with learners. Previous study showed that pedagogical agents offer feedback with appropriate politeness strategies can help students learn better [23]. Other study failed to replicate this Politeness Effect in real classroom learning environment [18]. The work presented here investigated the Politeness Effect in a foreign language intelligent tutoring system. Results show that tutorial feedback with socially intelligent strategies can influence motivation and learning outcomes, depending upon the extent to which the learning environment allows for the possibility of affecting learner motivational factors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kok, Iwan
Internship Report on Predicting Listener Backchannels Technical Report
University of Southern California Institute for Creative Technologies no. ICT-TR-02-2008, 2008.
Abstract | Links | BibTeX | Tags:
@techreport{de_kok_internship_2008,
title = {Internship Report on Predicting Listener Backchannels},
author = {Iwan Kok},
url = {http://ict.usc.edu/pubs/ICT%20TR%2002%202008.pdf},
year = {2008},
date = {2008-06-01},
number = {ICT-TR-02-2008},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this report I will document the work I have done during my internship at Institute for Creative Technologies from 22 January to 25 April under supervision of Louis-Phillipe Morency. During this time I have done research in the field of virtual humans, more specifically in the field of predicting and producing listener backchannels. But more on that later. I will start this report with a little background about the Institute for Creative Technologies and the project group which I was part of. After this the goal of my internship will be explained in Section 2. A general overview of our approach of achieving the goals set in Section 2 will be explained in Section 3. A more detailed description of the different steps taken will be given in Section 4. Following on that the results of the conducted research will be presented in Section 5. Finally a discussion of the work done, recom- mendations for improvement and future work will be given in Section 6.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
DeVault, David; Traum, David; Artstein, Ron
Practical Grammar-Based NLG from Examples Proceedings Article
In: The Fifth International Natural Language Generation Conference (INLG 2008), Salt Fork, OH, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{devault_practical_2008,
title = {Practical Grammar-Based NLG from Examples},
author = {David DeVault and David Traum and Ron Artstein},
url = {http://ict.usc.edu/pubs/Practical%20Grammar-Based%20NLG%20from%20Examples%20.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {The Fifth International Natural Language Generation Conference (INLG 2008)},
address = {Salt Fork, OH},
abstract = {We present a technique that opens up grammar-based generation to a wider range of practical applications by dramatically reducing the development costs and linguistic expertise that are required. Our method infers the grammatical resources needed for generation from a set of declarative examples that link surface expressions directly to the application's available semantic representations. The same examples further serve to optimize a run-time search strategy that generates the best output that can be found within an application-speciï¬c time frame. Our method offers substantially lower development costs than hand-crafted grammars for applicationspeciï¬c NLG, while maintaining high output quality and diversity.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
DeVault, David; Traum, David; Artstein, Ron
Making Grammar-Based Generation Easier to Deploy in Dialogue Systems Proceedings Article
In: 9th SIGdial Workshop on Discourse and Dialogue, Columbus, OH, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{devault_making_2008,
title = {Making Grammar-Based Generation Easier to Deploy in Dialogue Systems},
author = {David DeVault and David Traum and Ron Artstein},
url = {http://ict.usc.edu/pubs/Making%20Grammar-Based%20Generation%20Easier%20to%20Deploy%20in%20Dialogue%20Systems%20.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th SIGdial Workshop on Discourse and Dialogue},
address = {Columbus, OH},
abstract = {We present a development pipeline and associated algorithms designed to make grammarbased generation easier to deploy in implemented dialogue systems. Our approach realizes a practical trade-off between the capabilities of a system's generation component and the authoring and maintenance burdens imposed on the generation content author for a deployed system. To evaluate our approach, we performed a human rating study with system builders who work on a common largescale spoken dialogue system. Our results demonstrate the viability of our approach and illustrate authoring/performance trade-offs between hand-authored text, our grammar-based approach, and a competing shallow statistical NLG technique},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Pataki, Caroly; Pato, Michele; George, Cheryl St.; Sugar, Jeff; Rizzo, Albert
Virtual Justina: A PTSD Virtual Patient for Clinical Classroom Training Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine, pp. 113–118, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2008-2,
title = {Virtual Justina: A PTSD Virtual Patient for Clinical Classroom Training},
author = {Patrick G. Kenny and Thomas D. Parsons and Caroly Pataki and Michele Pato and Cheryl St. George and Jeff Sugar and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Justina-%20A%20PTSD%20Virtual%20Patient%20for%20Clinical%20Classroom%20Training.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine},
volume = {6},
pages = {113–118},
abstract = {The effects of trauma exposure manifest themselves in a wide range of symptoms: anxiety, post-traumatic stress disorder, fear, and various behavior problems. Effective interview skills are a core competency for the clinicians who will be working with children and adolescents exposed to trauma. The current project aims to improve child and adolescent psychiatry residents, and medical students’ interviewing skills and diagnostic acumen through practice with a female adolescent virtual human with post-traumatic stress disorder. This interaction with a virtual patient provides a context where immediate feedback can be provided regarding trainees’ interviewing skills in terms of psychiatric knowledge, sensitivity, and effectiveness. Results suggest that a virtual standardized patient can generate responses that elicit user questions relevant for PTSD categorization. We conclude with a discussion of the ways in which these capabilities allow virtual patients to serve as unique training tools whose special knowledge and reactions can be continually fed back to trainees. Our initial goal is to focus on a virtual patient with PTSD, but a similar strategy could be applied to teaching a broad variety of psychiatric diagnoses to trainees at every level from medical students, to psychiatry residents, to child and adolescent psychiatry residents.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Blascovich, James J.; Chemers, Martin M.; Hunt, Earl; Ilgen, Daniel R.; Larsen, Randy L.; Mayer, Richard E.; O'Neil, Harold Jr.; McLaughlin, Alan J.; Patel, Vilma L.; Quiñones, Miguel A.; Simons, Anna
Human Behavior in Military Contexts Book
The National Academies Press, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@book{gratch_human_2008,
title = {Human Behavior in Military Contexts},
author = {Jonathan Gratch and James J. Blascovich and Martin M. Chemers and Earl Hunt and Daniel R. Ilgen and Randy L. Larsen and Richard E. Mayer and Harold Jr. O'Neil and Alan J. McLaughlin and Vilma L. Patel and Miguel A. Quiñones and Anna Simons},
url = {http://www.ict.usc.edu/pubs/Human%20Behavior%20in%20Military%20Contexts.pdf},
year = {2008},
date = {2008-06-01},
publisher = {The National Academies Press},
abstract = {Human behavior forms the nucleus of military effectiveness. Humans operating in the complex military system must possess the knowledge, skills, abilities, aptitudes, and temperament to perform their roles effectively in a reliable and predictable manner, and effective military management requires understanding of how these qualities can be best provided and assessed. Scientific research in this area is critical to understanding leadership, training and other personnel issues, social interactions and organizational structures within the military. The U.S. Army Research Institute for the Behavioral and Social Sciences (ARI) asked the National Research Council to provide an agenda for basic behavioral and social research focused on applications in both the short and long-term. The committee responded by recommending six areas of research on the basis of their relevance, potential impact, and timeliness for military needs: intercultural competence; teams in complex environments; technology-based training; nonverbal behavior; emotion; and behavioral neurophysiology. The committee suggests doubling the current budget for basic research for the behavioral and social sciences across U.S. military research agencies. The additional funds can support approximately 40 new projects per year across the committee's recommended research areas. Human Behavior in Military Contexts includes committee reports and papers that demonstrate areas of stimulating, ongoing research in the behavioral and social sciences that can enrich the military's ability to recruit, train, and enhance the performance of its personnel, both organizationally and in its many roles in other cultures.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {book}
}
Roque, Antonio; Traum, David
Degrees of Grounding Based on Evidence of Understanding Proceedings Article
In: 9th SIGdial Workshop on Discourse and Dialogue, Columbus, OH, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_degrees_2008,
title = {Degrees of Grounding Based on Evidence of Understanding},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/Degrees%20of%20Grounding%20Based%20on%20Evidence%20of%20Understanding.pdf},
year = {2008},
date = {2008-06-01},
booktitle = {9th SIGdial Workshop on Discourse and Dialogue},
address = {Columbus, OH},
abstract = {We introduce the Degrees of Grounding model, which defines the extent to which material being discussed in a dialogue has been grounded. This model has been developed and evaluated by a corpus analysis, and includes a set of types of evidence of understanding, a set of degrees of groundedness, a set of grounding criteria, and methods for identifying each of these. We describe how this model can be used for dialogue management.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Thiebaux, Marcus; Marshall, Andrew; Marsella, Stacy C.; Kallmann, Marcelo
SmartBody: Behavior Realization for Embodied Conversational Agents Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Estoril, Portugal, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{thiebaux_smartbody_2008,
title = {SmartBody: Behavior Realization for Embodied Conversational Agents},
author = {Marcus Thiebaux and Andrew Marshall and Stacy C. Marsella and Marcelo Kallmann},
url = {http://ict.usc.edu/pubs/SmartBody-%20Behavior%20Realization%20for%20Embodies%20Conversational%20Agents.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Estoril, Portugal},
abstract = {Researchers demand much from their embodied conversational agents (ECA), requiring them to be both life-like, as well as responsive to events in an interactive setting. We find that a flexible combination of animation approaches may be needed to satisfy these needs. In this paper we present SmartBody, an open source modular framework for animating ECAs in real time, based on the notion of hierarchically connected animation controllers. Controllers in SmartBody can employ arbitrary animation algorithms such as keyfram interpolation, motion capture or procedural animation. Controllers can also schedule or combine other controllers. We discuss our architecture in detail, including how we incorporate traditional approaches, and develop the the notion of a controller as a reative module with in a generic gramework, for realizing modular animation control. To illustrate the versatility of the architecture, we also discuss a range of applications that have used SmartBody successfully.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gandhe, Sudeep; Leuski, Anton; Traum, David
Field Testing of an Interactive Question-Answering Character Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_field_2008,
title = {Field Testing of an Interactive Question-Answering Character},
author = {Ron Artstein and Sudeep Gandhe and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Field%20Testing%20of%20an%20Interactive%20Question-Answering%20Character%20.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {We tested a life-size embodied question-answering character at a convention where he responded to questions from the audience. The character's responses were then rated for coherence. The ratings, combined with speech transcripts, speech recognition results and the character's responses, allowed us to identify where the character needs to improve, namely in speech recognition and providing off-topic responses.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Marsella, Stacy C.; Hawkins, Tim
Individual Differences in Expressive Response: A Challenge for ECA Design Proceedings Article
In: 7th International Conference on Autonomous Agents and Multiagent Systems, Estoril, Portugal, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation
@inproceedings{wang_individual_2008,
title = {Individual Differences in Expressive Response: A Challenge for ECA Design},
author = {Ning Wang and Stacy C. Marsella and Tim Hawkins},
url = {http://ict.usc.edu/pubs/Individual%20Differences%20in%20Expressive%20Response.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {7th International Conference on Autonomous Agents and Multiagent Systems},
address = {Estoril, Portugal},
abstract = {To create realistic and expressive virtual humans, we need to develop better models of the processes and dynamics of human emotions and expressions. A first step in this effort is to develop means to systematically induce and capture realistic expressions in real humans. We conducted a series of studies on human emotions and facial expression using the Emotion Evoking Game (EVG) and a high-speed video camera. In this paper, we discuss a detailed analysis of facial expressions in response to a surprise situation. We provide details on the rich dynamics of facial expressions, along with data useful for animation of virtual human. The analysis of the data also revealed considerable individual differences in whether surprise was evoked and how it was expressed.},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jina; DeVault, David; Marsella, Stacy C.; Traum, David
Thoughts on FML: Behavior Generation in the Virtual Human Communication Architecture Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) First Functional Markup Language Workshop, Estoril, Portugal, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{lee_thoughts_2008,
title = {Thoughts on FML: Behavior Generation in the Virtual Human Communication Architecture},
author = {Jina Lee and David DeVault and Stacy C. Marsella and David Traum},
url = {http://ict.usc.edu/pubs/Thoughts%20on%20FML.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) First Functional Markup Language Workshop},
address = {Estoril, Portugal},
abstract = {We discuss our current architecture for the generation of natural language and non-verbal behavior in ICT virtual humans. We draw on our experience developing this archi- tecture to present our current perspective on several issues related to the standardization of FML and to the SAIBA framework more generally. In particular, we discuss our current use, and non-use, of FML-inspired representations in generating natural language, eye gaze, and emotional dis- plays. We also comment on some of the shortcomings of our design as currently implemented.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Velson, Martin
Narratoria, an Authoring Suite for Digital Interactive Narrative Proceedings Article
In: The Florida Artificial Intelligence Research Society, Key West, FL, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{van_velson_narratoria_2008,
title = {Narratoria, an Authoring Suite for Digital Interactive Narrative},
author = {Martin Velson},
url = {http://ict.usc.edu/pubs/Narratoria,%20an%20Authoring%20Suite%20for%20Digital%20Interactive%20Narrative.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {The Florida Artificial Intelligence Research Society},
address = {Key West, FL},
abstract = {In this paper we present an authoring tool called Narratoria that allows non-technical experts in the field of digital entertainment to create interactive narratives with 3D graphics and multimedia. Narratoria allows experts in digital entertainment to participate in the generation of story-based military training applications. Users of the tools can create story-arcs, screenplays, pedagogical goals and AI models using a single software application. Using game engines, which provide direct visual output in a real-time feedback-loop, users can view the final product as they edit.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manshadi, Mehdi; Swanson, Reid; Gordon, Andrew S.
Learning a Probabilistic Model of Event Sequences From Internet Weblog Stories Proceedings Article
In: 21st Conference of the Florida AI Society, Applied Natural Language Processing Track, Coconut Grove, FL, 2008.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{manshadi_learning_2008,
title = {Learning a Probabilistic Model of Event Sequences From Internet Weblog Stories},
author = {Mehdi Manshadi and Reid Swanson and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Learning%20a%20Probabilistic%20Model%20of%20Event%20Sequences%20From%20Internet%20Weblog%20Stories.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {21st Conference of the Florida AI Society, Applied Natural Language Processing Track},
address = {Coconut Grove, FL},
abstract = {One of the central problems in building broad-coverage story understanding systems is generating expectations about event sequences, i.e. predicting what happens next given some arbitrary narrative context. In this paper, we describe how a large corpus of stories extracted from Internet weblogs was used to learn a probabilistic model of event sequences using statistical language modeling techniques. Our approach was to encode weblog stories as sequences of events, one per sentence in the story, where each event was represented as a pair of descriptive key words extracted from the sentence. We then applied statistical language modeling techniques to each of the event sequences in the corpus. We evaluated the utility of the resulting model for the tasks of narrative event ordering and event prediction.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Hobbs, Jerry R.; Gordon, Andrew S.
The Deep Lexical Semantics of Emotions Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC) Workshop on Sentiment Analysis: Emotion, Metaphor, Ontology and Terminology (EMOT), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{hobbs_deep_2008,
title = {The Deep Lexical Semantics of Emotions},
author = {Jerry R. Hobbs and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Deep%20Lexical%20Semantics%20of%20Emotions.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC) Workshop on Sentiment Analysis: Emotion, Metaphor, Ontology and Terminology (EMOT)},
address = {Marrakech, Morocco},
abstract = {We understand discourse so well because we know so much. If we are to have natural language understanding systems that are able to deal with texts with emotional content, we must encode knowledge of human emotions for use in the systems. In particular, we must equip the system with a formal version of people's implicit theory of how emotions mediate between what they experience and what they do, and rules that link the theory with words and phrases in the emotional lexicon. The effort we describe here is part of a larger project in knowledge-based natural language understanding to construct a collection of abstract and concrete core formal theories of fundamental phenomena, geared to language, and to define or at least characterize the most common words in English in terms of these theories (Hobbs, 2008). One collection of theories we have put a considerable amount of work into is a commonsense theory of human cognition, or how people think they think (Hobbs and Gordon, 2005). A formal theory of emotions is an important piece of this. In this paper we describe this theory and our efforts to define a number of the most common words about emotions in terms of this and other theories. Vocabulary related to emotions has been studied extensively within the field of linguistics, with particular attention to cross-cultural differences (Athanasiadou and Tabakowska, 1998; Harkins and Wierzbicka, 2001; Wierzbicka, 1999). Within computational linguistics, there has been recent interest in creating large-scale text corpora where expressions of emotion and other private states are annotated (Wiebe et al., 2005). In Section 2 we describe Core WordNet and our categorization of it to determine the most frequent words about cognition and emotion. In Section 3 we describe an effort to flesh out the emotional lexicon by searching a large corpus for emotional terms, so we can have some assurance of high coverage in both the core theory and the lexical items linked to it. In Section 4 we sketch the principal facets of some of the core theories. In Section 5 we describe the theory of Emotion with several examples of words characterized in terms of the theories.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Fullerton, Tracy; Morie, Jacquelyn; Pearce, Celia
A Game of One's Own: Towards a New Gendered Poetics of Digital Space Proceedings Article
In: Fibreculture Journal: Internet Theory, Criticism and Research, Perth, Australia, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{fullerton_game_2008,
title = {A Game of One's Own: Towards a New Gendered Poetics of Digital Space},
author = {Tracy Fullerton and Jacquelyn Morie and Celia Pearce},
url = {http://ict.usc.edu/pubs/A%20Game%20of%20One%E2%80%99s%20Own-%20Towards%20a%20New%20Gendered%20Poetics%20of%20Digital%20Space.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {Fibreculture Journal: Internet Theory, Criticism and Research},
volume = {11},
address = {Perth, Australia},
abstract = {The techno-fetishism of computer game culture has lead to a predominately male sensibility towards the construction of space in digital entertainment. Real-time strategy games conceive of space as a domain to be conquered; first-person shooters create labyrinthine battlefields in which space becomes a context for combat. Massively multiplayer games offer the opportunity for non-linear exploration, but emphasize linear achievement within a combat-based narrative. In this paper, we argue for a new gendered, regendered and perhaps degendered poetics of game space, rethinking ways in which space is conceptualized and represented as a domain for play. We argue for a more egalitarian virtual playground that acknowledges and embraces a wider range of spatial and cognitive models, referencing literature, philosophy, fine art and non-digital games for inspiration. Reflecting on a variety of sources, beginning with Virginia Woolf's A Room of One's Own and Bachelard's Poetics of Space, feminist writings of Charlotte Gilman Perkins, Simone de Beauvoir, Hélène Cixous, Judith Butler, Janet Murray, and including contemporary game writers such as Lizbeth Klastrup, Mary Flanagan, Maia Engeli, and T.L. Taylor, we will argue for a new gendered poetics of game space, proposing an inclusionary approach that integrates feminine conceptions of space into the gaming landscape.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Traum, David; Ittycheriah, Midhun; Henderer, Joe
What would you ask a Conversational Agent? Observations of Human-Agent Dialogues in a Museum Setting Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_what_2008,
title = {What would you ask a Conversational Agent? Observations of Human-Agent Dialogues in a Museum Setting},
author = {Susan Robinson and David Traum and Midhun Ittycheriah and Joe Henderer},
url = {http://ict.usc.edu/pubs/What%20would%20you%20ask%20a%20conversational%20agent.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {Embodied Conversational Agents have typically been constructed for use in limited domain applications, and tested in very specialized environments. Only in recent years have there been more cases of moving agents into wider public applications (e.g. Bell et al., 2003; Kopp et al., 2005). Yet little analysis has been done to determine the differing needs, expectations, and behavior of human users in these environments. With an increasing trend for virtual characters to �go public�, we need to expand our understanding of what this entails for the design and capabilities of our characters. This paper explores these issues through an analysis of a corpus that has been collected since December 2006, from interactions with the virtual character Sgt Blackwell at the Cooper Hewitt Museum in New York. The analysis includes 82 hierarchical categories of user utterances, as well as specific observations on user preferences and behaviors drawn from interactions with Blackwell.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Russ, Thomas; Traum, David; Hovy, Eduard; Robinson, Susan
A Common Ground for Virtual Humans: Using an Ontology in a Natural Language Oriented Virtual Human Architecture Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hartholt_common_2008,
title = {A Common Ground for Virtual Humans: Using an Ontology in a Natural Language Oriented Virtual Human Architecture},
author = {Arno Hartholt and Thomas Russ and David Traum and Eduard Hovy and Susan Robinson},
url = {http://ict.usc.edu/pubs/A%20Common%20Ground%20for%20Virtual%20Humans-%20Using%20an%20Ontology%20in%20a%20Natural%20Language%20Oriented%20Virtual%20Human%20Architecture.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {When dealing with large, distributed systems that use state-of-the-art components, individual components are usually developed in parallel. As development continues, the decoupling invariably leads to a mismatch between how these components internally represent concepts and how they communicate these representations to other components: representations can get out of synch, contain localized errors, or become manageable only by a small group of experts for each module. In this paper, we describe the use of an ontology as part of a complex distributed virtual human architecture in order to enable better communication between modules while improving the overall flexibility needed to change or extend the system. We focus on the natural language understanding capabilities of this architecture and the relationship between language and concepts within the entire system in general and the ontology in particular.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Poesio, Massimo; Artstein, Ron
Anaphoric Annotation in the ARRAU Corpus Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{poesio_anaphoric_2008,
title = {Anaphoric Annotation in the ARRAU Corpus},
author = {Massimo Poesio and Ron Artstein},
url = {http://ict.usc.edu/pubs/Anaphoric%20Annotation%20in%20the%20ARRAU%20Corpus.pdf},
year = {2008},
date = {2008-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Marrakech, Morocco},
abstract = {Arrau is a new corpus annotated for anaphoric relations, with information about agreement and explicit representation of multiple antecedents for ambiguous anaphoric expressions and discourse antecedents for expressions which refer to abstract entities such as events, actions and plans. The corpus contains texts from different genres: task-oriented dialogues from the Trains-91 and Trains-93 corpus, narratives from the English Pear Stories corpus, newspaper articles from the Wall Street Journal portion of the Penn Treebank, and mixed text from the Gnome corpus.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hendler, James; Cimiano, Philipp; Dolgov, Dmitri; Kevin, Anat; Peter, Mika; Milch, Brian; Morency, Louis-Philippe; Motik, Boris; Neville, Jennifer; Sudderth, Erik B.; Ahn, Luis
AI's 10 to Watch Journal Article
In: IEEE Intelligent Systems, vol. 23, no. 3, pp. 9–19, 2008.
Abstract | Links | BibTeX | Tags:
@article{hendler_ais_2008,
title = {AI's 10 to Watch},
author = {James Hendler and Philipp Cimiano and Dmitri Dolgov and Anat Kevin and Mika Peter and Brian Milch and Louis-Philippe Morency and Boris Motik and Jennifer Neville and Erik B. Sudderth and Luis Ahn},
url = {http://ict.usc.edu/pubs/AI's%2010%20to%20Watch.pdf},
year = {2008},
date = {2008-05-01},
journal = {IEEE Intelligent Systems},
volume = {23},
number = {3},
pages = {9–19},
abstract = {The recipients of the 2008 IEEE Intelligent Systems 10 to Watch award—Philipp Cimiano, Dmitri Dolgov, Anat Levin, Peter Mika, Brian Milch, Louis-Philippe Morency, Boris Motik, Jennifer Neville, Erik Sudderth, and Luis von Ahn—discuss their current research and their visions of AI for the future.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Solomon, Steve; Lent, Michael; Core, Mark; Carpenter, Paul; Rosenberg, Milton
A Language for Modeling Cultural Norms, Biases and Stereotypes for Human Behavior Models Technical Report
2008.
Abstract | Links | BibTeX | Tags: Learning Sciences
@techreport{solomon_language_2008,
title = {A Language for Modeling Cultural Norms, Biases and Stereotypes for Human Behavior Models},
author = {Steve Solomon and Michael Lent and Mark Core and Paul Carpenter and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/A%20Language%20for%20Modeling%20Cultural%20Norms,%20Biases%20and%20Stereotypes%20for%20Human%20Behavior%20Models.pdf},
year = {2008},
date = {2008-04-01},
abstract = {Increasingly, the military has requirements for teaching cultural awareness, which demands flexible representations of cultural knowledge. The Culturally-Affected Behavior project seeks to define a language for encoding ethnographic data in order to capture cultural knowledge and use that knowledge to affect human behavior models. Having anthropologists encode ethnographic data will validate the language and will result in a library of culture models for immersive training.},
keywords = {Learning Sciences},
pubstate = {published},
tppubtype = {techreport}
}
Swanson, Reid; Chew, Elaine; Gordon, Andrew S.
Supporting Musical Creativity With Unsupervised Syntactic Parsing Proceedings Article
In: AAAI Spring Symposium Series, Stanford University, 2008.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{swanson_supporting_2008,
title = {Supporting Musical Creativity With Unsupervised Syntactic Parsing},
author = {Reid Swanson and Elaine Chew and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Supporting%20Musical%20Creativity%20With%20Unsupervised%20Syntactic%20Parsing.pdf},
year = {2008},
date = {2008-03-01},
booktitle = {AAAI Spring Symposium Series},
address = {Stanford University},
abstract = {Music and language are two human activities that fit well with a traditional notion of creativity and are particularly suited to computational exploration. In this paper we will argue for the necessity of syntactic processing in musical applications. Unsupervised methods offer uniquely interesting approaches to supporting creativity. We will demonstrate using the Constituent Context Model that syntactic structure of musical melodies can be learned automatically without annotated training data. Using a corpus built from the Well Tempered Clavier by Bach we describe a simple classification experiment that shows the relative quality of the induced parse trees for musical melodies.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}