Publications
Search
Metallinou, Angeliki; Lee, Sungbok; Narayanan, Shrikanth
Audio-Visual Emotion Recognition using Gaussian Mixture Models for Face and Voice Proceedings Article
In: Proceedings of the IEEE International Symposium on Multimedia, pp. 250–257, Berkeley, CA, 2008.
@inproceedings{metallinou_audio-visual_2008,
title = {Audio-Visual Emotion Recognition using Gaussian Mixture Models for Face and Voice},
author = {Angeliki Metallinou and Sungbok Lee and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Audio-Visual%20Emotion%20Recognition%20using%20Gaussian%20Mixture%20Models%20for%20Face%20and%20Voice.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the IEEE International Symposium on Multimedia},
pages = {250–257},
address = {Berkeley, CA},
abstract = {Emotion expression associated with human communica- tion is known to be a multimodal process. In this work, we investigate the way that emotional information is conveyed by facial and vocal modalities, and how these modalities can be effectively combined to achieve improved emotion recognition accuracy. In particular, the behaviors of differ- ent facial regions are studied in detail. We analyze an emo- tion database recorded from ten speakers (five female, five male), which contains speech and facial marker data. Each individual modality is modeled by Gaussian Mixture Mod- els (GMMs). Multiple modalities are combined using two different methods: a Bayesian classifier weighting scheme and support vector machines that use post classification ac- curacies as features. Individual modality recognition per- formances indicate that anger and sadness have compara- ble accuracies for facial and vocal modalities, while happi- ness seems to be more accurately transmitted by facial ex- pressions than voice. The neutral state has the lowest per- formance, possibly due to the vague definition of neutral- ity. Cheek regions achieve better emotion recognition ac- curacy compared to other facial regions. Moreover, classi- fier combination leads to significantly higher performance, which confirms that training detailed single modality clas- sifiers and combining them at a later stage is an effective approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe
Real-time Head Pose Estimation Using a Webcam: Monocular Adaptive View-based Appearance Model Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
@inproceedings{morency_real-time_2008,
title = {Real-time Head Pose Estimation Using a Webcam: Monocular Adaptive View-based Appearance Model},
author = {Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/REAL-TIME%20HEAD%20POSE%20ESTIMATION%20USING%20A%20WEBCAM-%20MONOCULAR%20ADAPTIVE%20VIEW-BASED%20APPEARANCE%20MODEL.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Accurately estimating the person's head position and orientation is an important task for a wide range of applications such as driver awareness and human-robot interaction. Over the past two decades, many approaches have been suggested to solve this problem, each with its own advantages and disadvantages. In this paper, we present a probabilistic framework called Monocular Adaptive View-based Appearance Model (MAVAM) which integrates the advantages from two of these approaches: (1) the relative precision and user-independence of differential registration, and (2) the robustness and bounded drift of keyframe tracking. In our experiments, we show how the MAVAM model can be used to estimate head position and orientation in real-time using a simple monocular camera. Our experiments on two previously published datasets show that the MAVAM framework can accurately track for a long period of time (textbackslashtextbackslashtextbackslashtextbackslashtextgreater2 minutes) with an average accuracy of 3.9 degrees and 1.2in with an inertial sensor and a 3D magnetic sensor.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Poesio, Massimo
Inter-Coder Agreement for Computational Linguistics Journal Article
In: Computational Linguistics, vol. 34, no. 4, pp. 555–596, 2008.
@article{artstein_inter-coder_2008,
title = {Inter-Coder Agreement for Computational Linguistics},
author = {Ron Artstein and Massimo Poesio},
url = {http://ict.usc.edu/pubs/Inter-Coder%20Agreement%20for%20Computational%20Linguistics.pdf},
year = {2008},
date = {2008-12-01},
journal = {Computational Linguistics},
volume = {34},
number = {4},
pages = {555–596},
abstract = {This article is a survey of methods for measuring agreement among corpus annotators. It exposes the mathematics and underlying assumptions of agreement coefficients, covering Krippendorff's alpha as well as Scott's pi and Cohen's kappa; discusses the use of coefficients in several annotation tasks; and argues that weighted, alpha-like coefficients, traditionally less used than kappa-like measures in Computational Linguistics, may be more appropriate for many corpus annotation tasks – but that their use makes the interpretation of the value of the coefficient even harder.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Cannon, Jacob; Gandhe, Sudeep; Gerten, Jillian; Henderer, Joe; Leuski, Anton; Traum, David
Coherence of Off-Topic Response for a Virtual Character Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
@inproceedings{artstein_coherence_2008,
title = {Coherence of Off-Topic Response for a Virtual Character},
author = {Ron Artstein and Jacob Cannon and Sudeep Gandhe and Jillian Gerten and Joe Henderer and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/COHERENCE%20OF%20OFF-TOPIC%20RESPONSES%20FOR%20A%20VIRTUAL%20CHARACTER.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {We demonstrate three classes of off-topic responses which allow a virtual question-answering character to handle cases where it does not understand the user's input: ask for clarification, indicate misunderstanding, and move on with the conversation. While falling short of full dialogue management, a combination of such responses together with prompts to change the topic can improve overall dialogue coherence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Leuski, Anton; Roque, Antonio; Gandhe, Sudeep; DeVault, David; Gerten, Jillian; Robinson, Susan; Martinovski, Bilyana
Natural Language Dialogue Architectures for Tactical Questioning Characters Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
@inproceedings{traum_natural_2008,
title = {Natural Language Dialogue Architectures for Tactical Questioning Characters},
author = {David Traum and Anton Leuski and Antonio Roque and Sudeep Gandhe and David DeVault and Jillian Gerten and Susan Robinson and Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Natural%20Language%20Dialogue%20Architectures.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper we contrast three architectures for natural language questioning characters. We contrast the relative costs and benefits of each approach in building characters for tactical questioning. The first architecture works purely at the textual level, using cross-language information retrieval techniques to learn the best output for any input from a training set of linked questions and answers. The second architecture adds a global emotional model and computes a compliance model, which can result in different outputs for different levels, given the same inputs. The third architecture works at a semantic level and allows authoring of different policies for response for different kinds of information. We describe these architectures and their strengths and weaknesses with respect to expressive capacity, performance, and authoring demands.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Parsons, Thomas D.; Pair, Jarrell; McLay, Robert N.; Johnston, Scott; Perlman, Karen; Deal, Robert; Reger, Greg; Gahm, Greg; Roy, Michael; Shilling, Russell; Rothbaum, Barbara O.; Graap, Ken; Spitalnick, Josh; Bordnick, Patrick; Difede, JoAnn
Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
@inproceedings{rizzo_clinical_2008,
title = {Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD},
author = {Albert Rizzo and Thomas D. Parsons and Jarrell Pair and Robert N. McLay and Scott Johnston and Karen Perlman and Robert Deal and Greg Reger and Greg Gahm and Michael Roy and Russell Shilling and Barbara O. Rothbaum and Ken Graap and Josh Spitalnick and Patrick Bordnick and JoAnn Difede},
url = {http://ict.usc.edu/pubs/Clinical%20Results%20from%20the%20Virtual%20Iraq%20Esposure%20Therapy%20Application%20for%20PTSD.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 5 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been previously used with reports of positive outcomes. The current paper will present the rationale and description of a VR PTSD therapy application (Virtual Iraq) and present initial findings from its use with active duty service members. Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Results from an open clinical trial using Virtual Iraq at the Naval Medical Center-San Diego with 20 treatment completers indicate that 16 no longer met PTSD diagnostic criteria at post-treatment, with only one not maintaining treatment gains at 3 month follow-up.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Hawkins, Tim; Peers, Pieter; Frederiksen, Sune; Debevec, Paul
Practical Modeling and Acquisition of Layered Facial Reflectance Journal Article
In: ACM Transaction on Graphics, vol. 27, no. 5, 2008.
@article{ghosh_practical_2008,
title = {Practical Modeling and Acquisition of Layered Facial Reflectance},
author = {Abhijeet Ghosh and Tim Hawkins and Pieter Peers and Sune Frederiksen and Paul Debevec},
url = {http://ict.usc.edu/pubs/Practical%20Modeling%20and%20Acquisition%20of%20Layered%20Facial%20Reflectance.pdf},
year = {2008},
date = {2008-12-01},
journal = {ACM Transaction on Graphics},
volume = {27},
number = {5},
abstract = {We present a practical method for modeling layered facial reflectance consisting of specular reflectance, single scattering, and shallow and deep subsurface scattering. We estimate parameters of appropriate reflectance models for each of these layers from just 20 photographs recorded in a few seconds from a single viewpoint. We extract spatially-varying specular reflectance and single-scattering parameters from polarization-difference images under spherical and point source illumination. Next, we employ direct-indirect separation to decompose the remaining multiple scattering observed under cross-polarization into shallow and deep scattering components to model the light transport through multiple layers of skin. Finally, we match appropriate diffusion models to the extracted shallow and deep scattering components for different regions on the face. We validate our technique by comparing renderings of subjects to reference photographs recorded from novel viewpoints and under novel illumination conditions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
McAlinden, Ryan; Bosack, Matthew; Macha, Adrian; Vargas, Esau; Walker, Tim; Mann, John; Cruz, Julio
Towards an Automated Pipeline for the Translation and Optimization of Geospatial Data for Virtual Environments Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
@inproceedings{mcalinden_towards_2008,
title = {Towards an Automated Pipeline for the Translation and Optimization of Geospatial Data for Virtual Environments},
author = {Ryan McAlinden and Matthew Bosack and Adrian Macha and Esau Vargas and Tim Walker and John Mann and Julio Cruz},
url = {http://ict.usc.edu/pubs/Towards%20an%20Automated%20Pipeline%20for%20the%20Translation%20and%20Optimization%20of%20Geospatial%20Data%20for%20Virtual%20Environments.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {The infusion of commercial game technology into U.S. Army training, simulation, and instructional domains has resulted in more immersive and engaging experiences for Soldiers to hone their skills. However, the influx of such technology comes at a significant cost, specifically in the creation of virtual environments in which these skills are simulated and practiced. Today's typical commercial triple-A game title cost upwards of $40-$60M and four to six years to develop, much of which is spent on producing the digital assets used to populate the scene (models, animations, etc). Additionally, this content is often suited for a custom type of rendering technology, and often cannot be reused without significant manual modification. Unfortunately, the Army has neither the financial or personnel resources available to create such highly immersive, reusable virtual content, nor the time to invest when current operations call for training or simulation data in a matter of hours, not months or years. In this paper, we discuss a research initiative aimed at significantly reducing the time and cost for converting, optimizing, and enhancing existing geospatial data for today's virtual environments. The goal is a completely automated process for ingesting existing military terrain data and outputting a technology-agnostic representation in less than 24 hours.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Treskunov, Anton; Sherstyuk, Andrei; Wang, Kin Lik; Pair, Jarrell
Real Binoculars with Virtual Functions for Mixed Environments Proceedings Article
In: International Conference on Advances in Computer Entertainment Technology 2008, Yokohama, Japan, 2008.
@inproceedings{treskunov_real_2008,
title = {Real Binoculars with Virtual Functions for Mixed Environments},
author = {Anton Treskunov and Andrei Sherstyuk and Kin Lik Wang and Jarrell Pair},
url = {http://ict.usc.edu/pubs/Real%20Binoculars%20with%20Virtual%20Functions%20for%20Mixed%20Environments.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {International Conference on Advances in Computer Entertainment Technology 2008},
address = {Yokohama, Japan},
abstract = {Though often desirable, the integration of real and virtual elements in mixed reality environments can be di⬚cult. We propose a number of techniques to facilitate scene exploration and object selection by giving users real instruments as props while implementing their functionality in a virtual part of the environment. Speci cally, we present a family of tools built upon the idea of using real binoculars for viewing virtual content. This approach matches user expectations with the tool's capabilities enhancing the sense of presence and increasing the depth of interaction between the real and virtual components of the scene. We also discuss possible applications of these tools and the results of our user study. This paper is an extended version of earlier work presented at the 4th International Workshop on the Tangible Space Initiative[5].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mower, Emily; Mataric, Maja J.; Narayanan, Shrikanth
Selection of Emotionally Salient Audio-Visual Features for Modeling Human Evaluations of Synthetic Character Emotion Displays Proceedings Article
In: Proceedings of the IEEE International Symposium on Multimedia, Berkeley, CA, 2008.
@inproceedings{mower_selection_2008,
title = {Selection of Emotionally Salient Audio-Visual Features for Modeling Human Evaluations of Synthetic Character Emotion Displays},
author = {Emily Mower and Maja J. Mataric and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Selection%20of%20Emotionally%20Salient%20Audio-Visual%20Features%20for%20Modeling%20Human%20Evaluations%20of%20Synthetic%20Character%20Emotion%20Displays.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the IEEE International Symposium on Multimedia},
address = {Berkeley, CA},
abstract = {Computer simulated avatars and humanoid robots have an increasingly prominent place in today's world. Accep- tance of these synthetic characters depends on their ability to properly and recognizably convey basic emotion states to a user population. This study presents an analysis of audio- visual features that can be used to predict user evaluations of synthetic character emotion displays. These features in- clude prosodic, spectral, and semantic properties of audio signals in addition to FACS-inspired video features [11]. The goal of this paper is to identify the audio-visual fea- tures that explain the variance in the emotional evaluations of na ̈ıve listeners through the utilization of information gain feature selection in conjunction with support vector ma- chines. These results suggest that there exists an emotion- ally salient subset of the audio-visual feature space. The features that contribute most to the explanation of evalua- tor variance are the prior knowledge audio statistics (e.g., average valence rating), the high energy band spectral com- ponents, and the quartile pitch range. This feature subset should be correctly modeled and implemented in the design of synthetic expressive displays to convey the desired emo- tions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swanson, Reid; Gordon, Andrew S.
Say Anything: A Massively collaborative Open Domain Story Writing Companion Proceedings Article
In: First International Conference on Interactive Digital Storytelling, Erfurt, Germany, 2008.
@inproceedings{swanson_say_2008,
title = {Say Anything: A Massively collaborative Open Domain Story Writing Companion},
author = {Reid Swanson and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Say%20Anything-%20A%20Massively%20collaborative%20Open%20Domain%20Story%20Writing%20Companion.pdf},
year = {2008},
date = {2008-11-01},
booktitle = {First International Conference on Interactive Digital Storytelling},
address = {Erfurt, Germany},
abstract = {Interactive storytelling is an interesting cross-disciplinary area that has importance in research as well as entertainment. In this paper we explore a new area of interactive storytelling that blurs the line between traditional interactive fiction and collaborative writing. We present a system where the user and computer take turns in writing sentences of a fictional narrative. Sentences contributed by the computer are selected from a collection of millions of stories extracted from Internet weblogs. By leveraging the large amounts of personal narrative content available on the web, we show that even with a simple approach our system can produce compelling stories with our users.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Han, Kyu J.; Georgiou, Panayiotis G.; Narayanan, Shrikanth
The SAIL Speaker Diarization System for Analysis of Spontaneous Meetings Proceedings Article
In: Proceedings of IEEE International Workshop on Multimedia Signal Processing (MMSP), Cairns, Australia, 2008.
@inproceedings{han_sail_2008,
title = {The SAIL Speaker Diarization System for Analysis of Spontaneous Meetings},
author = {Kyu J. Han and Panayiotis G. Georgiou and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/The%20SAIL%20Speaker%20Diarization%20System%20for%20Analysis%20of%20Spontaneous%20Meetings.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of IEEE International Workshop on Multimedia Signal Processing (MMSP)},
address = {Cairns, Australia},
abstract = {In this paper, we propose a novel approach to speaker diarization of spontaneous meetings in our own mul- timodal SmartRoom environment. The proposed speaker di- arization system first applies a sequential clustering concept to segmentation of a given audio data source, and then performs agglomerative hierarchical clustering for speaker-specific classi- fication (or speaker clustering) of speech segments. The speaker clustering algorithm utilizes an incremental Gaussian mixture cluster modeling strategy, and a stopping point estimation method based on information change rate. Through experiments on various meeting conversation data of approximately 200 minutes total length, this system is demonstrated to provide diarization error rate of 18.90% on average.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pataki, Caroly; Sugar, Jeff; Kenny, Patrick G.; Parsons, Thomas D.; Rizzo, Albert; Pato, Michele; George, Cheryl St.
A Virtual Adolescent Patient with PTSD for Training Psychiatrists Proceedings Article
In: Proceedings of the 55th Annual Meeting of the American Academy of Child Adolescent Psychiatry, Chicago, IL, 2008.
@inproceedings{pataki_virtual_2008,
title = {A Virtual Adolescent Patient with PTSD for Training Psychiatrists},
author = {Caroly Pataki and Jeff Sugar and Patrick G. Kenny and Thomas D. Parsons and Albert Rizzo and Michele Pato and Cheryl St. George},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Adolescent%20Patient%20with%20PTSD%20for%20Training%20Psychiatrists.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of the 55th Annual Meeting of the American Academy of Child Adolescent Psychiatry},
address = {Chicago, IL},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Pearce, Celia
Uses of Digital Enchantment: Computer Games as the New Fairy Tales Proceedings Article
In: Proceedings of the Vienna Games Conference 2008: The Future of Reality and Gaming (FROG), Vienna, Austria, 2008.
@inproceedings{morie_uses_2008,
title = {Uses of Digital Enchantment: Computer Games as the New Fairy Tales},
author = {Jacquelyn Morie and Celia Pearce},
url = {http://ict.usc.edu/pubs/The_uses_of_digital_enchantment.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of the Vienna Games Conference 2008: The Future of Reality and Gaming (FROG)},
address = {Vienna, Austria},
abstract = {In this paper we argue that digital games have come to fill the cultural niche traditionally occupied by fairytales, and that they are ideally suited to realize some of the unique characteristics of this genre of folklore and literature. Arguably one of the most influential authors on game narrative and genre, J.R.R. Tolkien wrote extensively about fairytales, authored fairytales and considered his great epic work of high fantasy, "The Trilogy of the Ring," to be a fairy tale of sorts. He argued that fairytales were not about fairies per se but took place in the "realm of faerie," the magical world that fairies inhabit. "The realm of fairy-story is wide and deep and high and filled with many things: all manner of beasts and birds are found there; shoreless seas and stars uncounted; beauty that is an enchantment, and ever-present peril; both joy and sorrow as sharp as swords." [1] The "realm of faerie" provides a context for archetypal characters and narratives that express the inner life of the child and the process of transitioning to adulthood, a universal theme with has equal resonance with adults. In The Uses of Enchantment, controversial psychologist Bruno Betttelheim argues that "The motifs of fairy tales are experienced as wondrous because the child feels understood and appreciated deep down in his feelings, hopes, and anxieties, without these all having to be dragged up and investigated in the harsh light of a rationality that is still beyond him." [2] "...the internal processes are externalized and become comprehensible as represented by the figures of the story and its events." [3] These externalized processes can be seen in a wide range of digital games that put the player in the role of fairytale heroine, or more often, hero. Single-player adventure-style games such as the Zelda and Final Fantasy series, Ico, Shadow of the Collosus, Beyond Good and Evil, Okami and the Longest Journey series bring the unique affordances of the computer as a purveyor of magic to bear on this classic literary genre. Science fiction author Arthur C. Clark famously asserted that "Any sufficiently advanced technology is indistinguishable from magic." [4] Frederick Brooks, in The Mythical Man-Month [5], brings another level of refinement to this by describing the alchemic conjuring qualities of the computer thusly: "One types the correct incantation on a keyboard and a display screen comes to life, showing things that never were nor could be." Indeed even the nomenclature of MUDs, in which programmers are referred to as "wizards," seems to confer this quality of magical enchantment to the very creators of games themselves. Given its propensity for magic, the computer is particularly well-suited as a means of expression for the fairytale genre, shifting the focus from empathy with a central character engaged in an epic journey, to endowing a player with the agency to fulfill his or her destiny. We see the trajectory of the "realm of faerie" in the tradition from Tolkien's literary masterworks to the contemporary MMOG. Tolkien's world formed the inspiration for the tabletop role-playing games of the seventies, particularly Dungeons and Dragons, which gave rise to the MUDs of the 1980s and finally the fully realized multiplayer 3D computer fantasy worlds of the 1990s to the present, and the recent release of Lord of the Rings Online. This instrumentalizaton of fantasy environments through mathematical constructs provided a vital transition for the fairytale genre from the world of words to the world of numbers, and hence the world of computers. Today, the fantasy worlds of Tolkien, as well as the new fairy tales of game developers, have been rendered in their full glory via the "correct incantation on a keyboard." While it remains to be seen how or if these new digital fairytales will stand the tests of time as their literary counterparts have done, we argue that they fulfill a similar and vital role in providing today's children a sense of ritual and power in their own hero's journey from child to adulthood. References [1] Tolkien, J.R.R. (1966). The Tolkien Reader. New York: Ballantine. [2] Bettelheim, Bruno. (1975). The Uses of Enchantment: The Meaning and Importance of Fairy Tales. New York: Alfred K. Knopf. [3] Ibid [4] Clark, Arthur C. (1962). Profiles of the Future; an Inquiry into the Limits of the Possible. New York: Harper & Row. [5] Brooks, Frederick P. (1975). The mythical man month: Essays on software engineering. Reading, MA: Addison-Wesley.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn
The Performance of the Self and Its Effect on Presence in Virtual Worlds Proceedings Article
In: Proceedings of the 11th Annual International Workshop on Presence, pp. 265–269, Padova, Italy, 2008.
@inproceedings{morie_performance_2008,
title = {The Performance of the Self and Its Effect on Presence in Virtual Worlds},
author = {Jacquelyn Morie},
url = {http://ict.usc.edu/pubs/The%20Performance%20of%20the%20Self%20and%20Its%20Effect%20on%20Presence%20in%20Virtual%20Worlds.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of the 11th Annual International Workshop on Presence},
pages = {265–269},
address = {Padova, Italy},
abstract = {This paper addresses the many types of roles that people adopt within digital arenas such as online virtual worlds, and how those authored selves can enhance the sense of Self presence. Erving Goffman maintains that we play many roles in our everyday lives and that our identity is constantly being redefined by both aspects of a situation and the other people with whom we interact. With the explosion of online virtual worlds, the possibilities for such performances of self have multiplied. We now have more opportunities to explore aspects of our personalities including those that we might be reluctant to expose in real life situations. This is a new development for virtual reality: participants can create their appearance in online virtual worlds and become extremely connected to it. The potential for these personas to affect and enhance the sense of Presence should be addressed, and both quantitative and qualitative methods developed to measure their effects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; DeVault, David; Roque, Antonio; Martinovski, Bilyana; Artstein, Ron; Leuski, Anton; Gerten, Jillian; Traum, David
From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters Proceedings Article
In: Proceedings of InterSpeech, 2008.
@inproceedings{gandhe_domain_2008,
title = {From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters},
author = {Sudeep Gandhe and David DeVault and Antonio Roque and Bilyana Martinovski and Ron Artstein and Anton Leuski and Jillian Gerten and David Traum},
url = {http://ict.usc.edu/pubs/From%20Domain%20Specification%20to%20Virtual%20Humans.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of InterSpeech},
abstract = {We present a new approach for rapidly developing dialogue capabilities for virtual humans. Starting from domain specification, an integrated authoring interface automatically generates dialogue acts with all possible contents.These dialogue acts are linked to example utterances in order to provide training data for natural language understanding and generation. The virtual human dialogue system contains a dialogue manager following the information-state approach, using finite-state machines and SCXML to manage local coherence, as well as explicit modeling of emotions and compliance level and a grounding component based on evidence of understanding. Using the authoring tools, we design and implement a version of the virtual human Hassan and compare to previous architectures for the character.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Story Management Technologies for Organizational Learning Proceedings Article
In: International Conference on Knowledge Management, Special Track on Intelligent Assistance for Self-Directed and Organizational Learning, Graz, Austria, 2008.
@inproceedings{gordon_story_2008,
title = {Story Management Technologies for Organizational Learning},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Story%20Management%20Technologies%20for%20Organizational%20Learning.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {International Conference on Knowledge Management, Special Track on Intelligent Assistance for Self-Directed and Organizational Learning},
address = {Graz, Austria},
abstract = {The stories told among members of an organization are an effective instrument for knowledge socialization, the sharing of experiences through social mechanisms. However, the utility of stories for organizational learning is limited due to the difficulties in acquiring stories that are relevant to the practices of an organization, identifying the learning goals that these stories serve, and delivering these stories to the right people and the right time in a manner that best facilitates learning. In this paper we outline a vision for story-based organizational learning in the future, and describe three areas where intelligent technologies can be applied to automate story management practices in support of organizational learning. First, we describe automated story capture technologies that identify narratives of people's experiences within the context of a larger discourse. Second, we describe automated retrieval technologies that identify stories that are relevant to specific educational needs. Third, we describe how stories can be transformed into effective story-based learning environments with minimal development costs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels Journal Article
In: Lecture Notes in Computer Science, vol. 5208, pp. 484–485, 2008.
@article{de_melo_evolving_2008,
title = {Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evolving%20Expression%20of%20Emotions%20in%20Virtual%20Humans%20Using%20Lights%20and%20Pixels.pdf},
year = {2008},
date = {2008-09-01},
journal = {Lecture Notes in Computer Science},
volume = {5208},
pages = {484–485},
abstract = {nspired by the arts, this paper addresses the challenge of expressing emotions in virtual humans using the environment's lights and the screen's pixels. An evolutionary approach is proposed which relies on genetic algorithms to learn how to map emotions into these forms of expression. The algorithm evolves populations of hypotheses, where each hypothesis represents a configuration of lighting and screen expression. Hypotheses are evaluated by a critic ensemble composed of artificial and human critics. The need for human critics is motivated by a study which reveals the limitations of an approach that relies only on artificial critics that follow principles from art literature. We also address the need for the model to improve with experience and to adapt to the individual, social and cultural values in the arts. Finally, a second study is described where subjects successfully evolved mappings for joy and sadness.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rizzo, Albert
Virtual Human Patients for Training of Clinical Interview and Communication Skills Proceedings Article
In: Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology, Maia, Portugal, 2008, ISBN: 07 049 15 00 6.
@inproceedings{parsons_virtual_2008,
title = {Virtual Human Patients for Training of Clinical Interview and Communication Skills},
author = {Thomas D. Parsons and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Human%20Patients%20for%20Training%20of%20Clinical%20Interview%20and%20Communication%20Skills.pdf},
isbn = {07 049 15 00 6},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Maia, Portugal},
abstract = {Although schools commonly make use of standardized patients to teach interview skills, the diversity of the scenarios standardized patients can characterize is limited by availability of human actors. Virtual Human Agent technology has evolved to a point where esearchers may begin developing mental health applications that make use of virtual reality patients. The work presented here is a preliminary attempt at what we believe to be a large application area. Herein we describe an ongoing study of our virtual patients. We present an approach that allows novice mental health clinicians to conduct an interview with virtual character that emulates 1) an adolescent male with conduct disorder; and 2) an adolescent female who has recently been physically traumatized.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Swanson, Reid
Envisioning With Weblogs Proceedings Article
In: International Conference on New Media Technology, Special Track on Knowledge Acquisition From the Social Web, Graz, Austria, 2008.
@inproceedings{gordon_envisioning_2008,
title = {Envisioning With Weblogs},
author = {Andrew S. Gordon and Reid Swanson},
url = {http://ict.usc.edu/pubs/Envisioning%20With%20Weblogs.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {International Conference on New Media Technology, Special Track on Knowledge Acquisition From the Social Web},
address = {Graz, Austria},
abstract = {In this position paper we present a vision of how the stories that people tell in Internet weblogs can be used directly for automated commonsense reasoning, specifically to support the core envisionment functions of event prediction, explanation, and imagination.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.