Publications
Search
Gordon, Andrew S.; Ganesan, Kavita
Automated Story Capture From Conversational Speech Proceedings Article
In: 3rd International Conference on Knowledge Capture (K-CAP 05), Banff, Alberta, Canada, 2005.
@inproceedings{gordon_automated_2005,
title = {Automated Story Capture From Conversational Speech},
author = {Andrew S. Gordon and Kavita Ganesan},
url = {http://ict.usc.edu/pubs/Automated%20Story%20Capture%20From%20Conversational%20Speech.pdf},
year = {2005},
date = {2005-10-01},
booktitle = {3rd International Conference on Knowledge Capture (K-CAP 05)},
address = {Banff, Alberta, Canada},
abstract = {While storytelling has long been recognized as an important part of effective knowledge management in organizations, knowledge management technologies have generally not distinguished between stories and other types of discourse. In this paper we describe a new type of technological support for storytelling that involves automatically capturing the stories that people tell to each other in conversations. We describe our first attempt at constructing an automated story extraction system using statistical text classification and a simple voting scheme. We evaluate the performance of this system and demonstrate that useful levels of precision and recall can be obtained when analyzing transcripts of interviews, but that performance on speech recognition data is not above what can be expected by chance. This paper establishes the level of performance that can be obtained using a straightforward approach to story extraction, and outlines ways in which future systems can improve on these results and enable a wide range of knowledge socialization applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.; Kenny, Patrick G.; Hovy, Eduard; Narayanan, Shrikanth; Fast, Edward; Martinovski, Bilyana; Baghat, Rahul; Robinson, Susan; Marshall, Andrew; Wang, Dagen; Gandhe, Sudeep; Leuski, Anton
Dealing with Doctors: A Virtual Human for Non-team Interaction Proceedings Article
In: 6th SIGdial Conference on Discourse and Dialogue, Lisbon, Portugal, 2005.
@inproceedings{traum_dealing_2005,
title = {Dealing with Doctors: A Virtual Human for Non-team Interaction},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella and Patrick G. Kenny and Eduard Hovy and Shrikanth Narayanan and Edward Fast and Bilyana Martinovski and Rahul Baghat and Susan Robinson and Andrew Marshall and Dagen Wang and Sudeep Gandhe and Anton Leuski},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Doctors.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {6th SIGdial Conference on Discourse and Dialogue},
address = {Lisbon, Portugal},
abstract = {We present a virtual human do tor who an engage in multi-modal negotiation dialogue with people from other organizations. The do tor is part of the SASO-ST system, used for training for non-team intera tions},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan
Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
@inproceedings{traum_fight_2005,
title = {Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis},
author = {David Traum and William Swartout and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Fight,%20Flight,%20or%20Negotiate-%20Believable%20Strategies%20for%20Conversing%20under%20Crisis.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {This paper des ribes a model of onversation strategies implemented in virtual humans designed to help people learn negotiation skills. We motivate and dis uss these strategies and their use to allow a virtual human to engage in omplex adversarial negotiation with a human trainee. Choi e of strategy depends on both the personality of the agent and assessment of the likelihood that the negotiation an be bene ial. Exe ution of strategies an be performed by hoosing spe i dialogue behaviors su h as whether and how to respond to a proposal. Current assessment of the value of the topi , the utility of the strategy, and aÆliation toward the other onversants an be used to dynami ally hange strategies throughout the ourse of a onversation. Examples will be given from the SASO-ST proje t, in whi h a trainee learns to negotiate by intera ting with virtual humans who employ these strategies.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Iyer, Kumar; Luigi, Donat-Pierre; Williams, Josh; Dozois, Aimee; Rizzo, Albert
Development of a Data Management Tool for Investigating Multivariate Space and Free Will Experiences Journal Article
In: Applied Psychophysiology and Biofeedback, vol. 30, no. 3, pp. 319–331, 2005.
@article{morie_development_2005,
title = {Development of a Data Management Tool for Investigating Multivariate Space and Free Will Experiences},
author = {Jacquelyn Morie and Kumar Iyer and Donat-Pierre Luigi and Josh Williams and Aimee Dozois and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20Data%20Management%20Tool%20for%20Investigating%20Multivariate%20Space%20and%20Free%20Will%20Experiences%20in%20Virtual%20Reality.pdf},
year = {2005},
date = {2005-09-01},
journal = {Applied Psychophysiology and Biofeedback},
volume = {30},
number = {3},
pages = {319–331},
abstract = {While achieving realism has been a main goal in making convincing virtual reality (VR) environments, just what constitutes realism is still a question situated firmly in the research domain. VR has become mature enough to be used in therapeutic applications such as clinical exposure therapy with some success. We now need detailed scientific investigations to better understand why VR works for these types of cases, and how it could work for other key applications such as training. Just as in real life, it appears that the factors will be complex and multi-variate, and this plethoric situation presents exceptional challenges to the VR researcher. We would not want to lessen VR’s ability to replicate real world conditions in order to more easily study it, however, for by doing so we may compromise the very qualities that comprise its effectiveness. What is really needed are more robust tools to instrument, organize, and visualize the complex data generated by measurements of participant experiences in a realistic virtual world. We describe here our first study in an ongoing program of effective virtual environment research, the types of data we are dealing with, and a specific tool we have been compelled to create that allows us some measure of control over this data. We call this tool Phloem, after the botanical channels that plants use to transport, support and store nutrients.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Debevec, Paul
Capturing and Simulating Physically Accurate Illumination in Computer Graphics Proceedings Article
In: 11th Annual Symposium on Frontiers of Engineering, Niskayuna, NY, 2005.
@inproceedings{debevec_capturing_2005,
title = {Capturing and Simulating Physically Accurate Illumination in Computer Graphics},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Simulating%20Physically%20Accurate%20Illumination%20in%20Computer%20Graphics.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {11th Annual Symposium on Frontiers of Engineering},
address = {Niskayuna, NY},
abstract = {Anyone who has seen a recent summer blockbuster has witnessed the dramatic increases in computer-generated realism in recent years. Visual effects supervisors now report that bringing even the most challenging visions of film directors to the screen is no longer a question of whatDs possible; with todayDs techniques it is only a matter of time and cost. Driving this increase in realism have been computer graphics (CG) techniques for simulating how light travels within a scene and for simulating how light reflects off of and through surfaces. These techniquesJsome developed recently, and some originating in the 1980DsJare being applied to the visual effects process by computer graphics artists who have found ways to channel the power of these new tools.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kallman, Marcelo; Marsella, Stacy C.
Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans Proceedings Article
In: International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
@inproceedings{kallman_hierarchical_2005,
title = {Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans},
author = {Marcelo Kallman and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Hierarchical%20Motion%20Controllers%20for%20Real-Time%20Autonomous%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Continuous and synchronized whole-body motions are essential for achieving believable autonomous virtual humans in interactive applications. We present a new motion control architecture based on generic controllers that can be hierarchically interconnected and reused in real-time. The hierarchical organization implies that leaf controllers are motion generators while the other nodes are connectors, performing operations such as interpolation, blending, and precise scheduling of children controllers. We also describe how the system can correctly handle the synchronization of gestures with speech in order to achieve believable conversational characters. For that purpose, different types of controllers implement a generic model of the different phases of a gesture.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kwon, Soon-il; Narayanan, Shrikanth
Unsupervised Speaker Indexing Using Generic Models Journal Article
In: IEEE Transactions on Speech and Audio Processing, vol. 13, no. 5, pp. 1004–1013, 2005.
@article{kwon_unsupervised_2005,
title = {Unsupervised Speaker Indexing Using Generic Models},
author = {Soon-il Kwon and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Unsupervised%20Speaker%20Indexing%20Using%20Generic%20Models.pdf},
year = {2005},
date = {2005-09-01},
journal = {IEEE Transactions on Speech and Audio Processing},
volume = {13},
number = {5},
pages = {1004–1013},
abstract = {Unsupervised speaker indexing sequentially detects points where a speaker identity changes in a multispeaker audio stream, and categorizes each speaker segment, without any prior knowledge about the speakers. This paper addresses two chal- lenges: The first relates to sequential speaker change detection. The second relates to speaker modeling in light of the fact that the number/identity of the speakers is unknown. To address this issue, a predetermined generic speaker-independent model set, called the sample speaker models (SSM), is proposed. This set can be useful for more accurate speaker modeling and clustering without requiring training models on target speaker data. Once a speaker-independent model is selected from the generic sample models, it is progressively adapted into a specific speaker-depen- dent model. Experiments were performed with data from the Speaker Recognition Benchmark NIST Speech corpus (1999) and the HUB-4 Broadcast News Evaluation English Test material (1999). Results showed that our new technique, sampled using the Markov Chain Monte Carlo method, gave 92.5% indexing accuracy on two speaker telephone conversations, 89.6% on four-speaker conversations with the telephone speech quality, and 87.2% on broadcast news. The SSMs outperformed the universal background model by up to 29.4% and the universal gender models by up to 22.5% in indexing accuracy in the experiments of this paper.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Performance Geometry Capture for Spatially Varying Relighting Proceedings Article
In: SIGGRAPH 2005 Sketch, Los Angeles, CA, 2005.
@inproceedings{jones_performance_2005,
title = {Performance Geometry Capture for Spatially Varying Relighting},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Performance%20Geometry%20Capture%20for%20Spatially%20Varying%20Relighting.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH 2005 Sketch},
address = {Los Angeles, CA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Sanjit; Chu, Anson; Cohen, Jonathan; Pighin, Frédéric
Fluid Simulation Via Disjoint Translating Grids Proceedings Article
In: Special Interest Group - Graphics Technical Sketch, Los Angeles, CA, 2005.
@inproceedings{patel_fluid_2005,
title = {Fluid Simulation Via Disjoint Translating Grids},
author = {Sanjit Patel and Anson Chu and Jonathan Cohen and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Fluid%20Simulation%20Via%20Disjoint%20Translating%20Grids.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {Special Interest Group - Graphics Technical Sketch},
address = {Los Angeles, CA},
abstract = {We present an adaptive fluid simulation technique that splits the computation domain in multiple moving grids. Using this technique, we are able to simulate fluids over large spatial domains with reasonable computation times.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Median Cut Algorithm for Light Probe Sampling Proceedings Article
In: SIGGRAPH (Special Interest Group - Graphics), Los Angeles, CA, 2005.
@inproceedings{debevec_median_2005,
title = {A Median Cut Algorithm for Light Probe Sampling},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Median%20Cut%20Algorithm%20for%20Light%20Probe%20Sampling.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH (Special Interest Group - Graphics)},
address = {Los Angeles, CA},
abstract = {We present a technique for approximating a light probe image as a constellation of light sources based on a median cut algorithm. The algorithm is efï¬cient, simple to implement, and can realistically represent a complex lighting environment with as few as 64 point light sources.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Williams, Josh; Dozois, Aimee; Luigi, Donat-Pierre
The Fidelity of "Feel": Emotional Affordance in Virtual Environments Proceedings Article
In: 11th International Conference on Human-Computer Interaction, Las Vegas, NV, 2005.
@inproceedings{morie_fidelity_2005,
title = {The Fidelity of "Feel": Emotional Affordance in Virtual Environments},
author = {Jacquelyn Morie and Josh Williams and Aimee Dozois and Donat-Pierre Luigi},
url = {http://ict.usc.edu/pubs/The%20Fidelity%20of%20Feel-%20Emotional%20Affordance%20in%20Virtual%20Environments.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {11th International Conference on Human-Computer Interaction},
address = {Las Vegas, NV},
abstract = {Virtual environments (VEs) should be able to provide experiences as rich and complex as those to be had in real life. While this seems obvious, it is not yet possible to create a perfect simulacrum of the real world, so such correspondence requires the development of design techniques by which VEs can be made to appear more real. It also requires evaluation studies to determine if such techniques produce the desired results. As emotions are implicated in our phenomenological understanding of the physical world, they should also play an integral role in the experience of the virtual one. Therefore, a logical sequence of experimentation to understand how VEs can be made to function as emotion-induction systems is in order. The Sensory Environments Evaluation (SEE) research program has developed a twofold design process to explore if we react to virtually supplied stimuli as we do to the real world equivalents. We look at manipulating both the sensory and emotional aspects of not only the environment but also the participant. We do this with the focus on what emotional affordances this manipulation will provide. Our first evaluation scenario, DarkCon, was designed in this way to produce a strong sense of presence. Sixty-four subjects have been fielded to date and the data is currently being analyzed for results. We hope to find that rich design techniques along with the frame of mind with which a VR experience is approached will predictably influence perception and behavior within a virtual world. We will use these results to inform continuing research into the creation of more emotionally affective VEs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, H. Chad; Core, Mark; Lent, Michael; Solomon, Steve; Gomboc, Dave
Explainable Artificial Intelligence for Training and Tutoring Proceedings Article
In: 12th International Conference on Artificial Intelligence in Education, Amsterdam, The Netherlands, 2005.
@inproceedings{lane_explainable_2005,
title = {Explainable Artificial Intelligence for Training and Tutoring},
author = {H. Chad Lane and Mark Core and Michael Lent and Steve Solomon and Dave Gomboc},
url = {http://ict.usc.edu/pubs/Explainable%20Artificial%20Intelligence%20for%20Training%20and%20Tutoring.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {12th International Conference on Artificial Intelligence in Education},
address = {Amsterdam, The Netherlands},
abstract = {This paper describes an Explainable Artificial Intelligence (XAI) tool that allows entities to answer questions about their activities within a tactical simulation. We show how XAI can be used to provide more meaningful after-action reviews and discuss ongoing work to integrate an intelligent tutor into the XAI framework.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
Virtual Humans for non-team interaction training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids, Utrecht, Netherlands, 2005.
@inproceedings{traum_virtual_2005,
title = {Virtual Humans for non-team interaction training},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20non-team%20interaction%20training.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids},
address = {Utrecht, Netherlands},
abstract = {We describe a model of virtual humans to be used in training for non-team interactions, such as negotiating with people from other organizations. The virtual humans build on existing task, dialogue, and emotion models, with an added model of trust, which are used to understand and produce interactional moves. The model has been implemented within an agent in the SASO-ST system, and some example dialogues are given, illustrating the necessity for building social bonds.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a computational model of emotion Journal Article
In: Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004, vol. 11, no. 1, pp. 23–43, 2005.
@article{gratch_evaluating_2005,
title = {Evaluating a computational model of emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20computational%20model%20of%20emotion.pdf},
year = {2005},
date = {2005-07-01},
journal = {Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004},
volume = {11},
number = {1},
pages = {23–43},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we evaluate them against the phenomena they purport to model. In this paper, we present one method to evaluate an emotion model that compares the behavior of the model against human behavior using a standard clinical instrument for assessing human emotion and coping. We use this method to evaluate the Emotion and Adaptation (EMA) model of emotion Gratch and Marsella. The evaluation highlights strengths of the approach and identifies where the model needs further development.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Riedl, Mark O.; Lane, H. Chad; Hill, Randall W.; Swartout, William
Automated Story Direction and Intelligent Tutoring: Towards a Unifying Architecture Proceedings Article
In: AI and Education 2005 Workshop on Narrative Learning Environments, Amsterdam, The Netherlands, 2005.
@inproceedings{riedl_automated_2005,
title = {Automated Story Direction and Intelligent Tutoring: Towards a Unifying Architecture},
author = {Mark O. Riedl and H. Chad Lane and Randall W. Hill and William Swartout},
url = {http://ict.usc.edu/pubs/Automated%20Story%20Direction%20and%20Intelligent%20Tutoring-%20Towards%20a%20Unifying%20Architecture.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {AI and Education 2005 Workshop on Narrative Learning Environments},
address = {Amsterdam, The Netherlands},
abstract = {Recently, interactive storytelling systems H systems that allow a user to make decisions that can potentially impact the direction of a narrative H have been applied to training and education. Interactive storytelling systems often rely on an automated story director to manage the userKs experience. The focus of an automated director is the emergence of a narrative-like experience for the user. In contrast, intelligent tutors traditionally address the acquisition or strengthening of a learner's knowledge. Our goal is to build training simulations that cultivate compelling storylines while simultaneously maintaining a pedagogical presence by incorporating both automated story direction and intelligent tutoring into an immersive environment. But what is the relationship between an automated director and an intelligent tutor? In this paper, we discuss the similarities and differences of automated story directors and intelligent tutors and, based on our analysis, recommend an architecture for building narrative-based training simulations that utilize both effectively and without conflict.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Kim, Gerard J.; Yeh, Shih-Ching; Thiebaux, Marcus; Hwang, Jayne; Buckwalter, John Galen
Development of a Benchmarking Scenario for Testing 3D User Interface Devices and Interaction Methods Proceedings Article
In: Proceedings of the 11th International Conference on Human Computer Interaction, Las Vegas, NV, 2005.
@inproceedings{rizzo_development_2005,
title = {Development of a Benchmarking Scenario for Testing 3D User Interface Devices and Interaction Methods},
author = {Albert Rizzo and Gerard J. Kim and Shih-Ching Yeh and Marcus Thiebaux and Jayne Hwang and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20Benchmarking%20Scenario%20for%20Testing%203D%20User%20Interface%20Devices%20and%20Interaction%20Methods.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {Proceedings of the 11th International Conference on Human Computer Interaction},
address = {Las Vegas, NV},
abstract = {To address a part of the challenge of testing and comparing various 3D user interface devices and methods, we are currently developing and testing a VR 3D User Interface benchmarking scenario. The approach outlined in this paper focuses on the capture of human interaction performance on object selection and manipulation tasks using standardized and scalable block configurations that allow for measurement of speed and efficiency with any interaction device or method. The block configurations that we are using as benchmarking stimuli are accompanied by a pure mental rotation visuospatial assessment test. This feature will allow researchers to test usersX existing spatial abilities and statistically parcel out the variability due to innate ability, from the actual hands-on performance metrics. This statistical approach could lead to a more pure analysis of the ergonomic features of interaction devices and methods separate from existing user abilities. An initial test was conducted at two sites using this benchmarking system to make comparisons between 3D/gesture-based and 2D/mouse-based interactions for 3D selection and manipulation. Our preliminary results demonstrated, as expected, that the 3D/gesture based method in general outperformed the 2D/mouse interface. As well there were statistically significant performance differences between different user groups when categorized by their sex, visuospatial ability and educational background.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Commonsense Psychology and the Functional Requirements of Cognitive Models Proceedings Article
In: American Association of Artificial Intelligence Workshop on Modular Construction of Human-Like Intelligence, AAAI Press, Pittsburgh, PA, 2005.
@inproceedings{gordon_commonsense_2005,
title = {Commonsense Psychology and the Functional Requirements of Cognitive Models},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Commonsense%20Psychology%20and%20the%20Functional%20Requirements%20of%20Cognitive%20Models.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {American Association of Artificial Intelligence Workshop on Modular Construction of Human-Like Intelligence},
publisher = {AAAI Press},
address = {Pittsburgh, PA},
abstract = {In this paper we argue that previous models of cognitive abilities (e.g. memory, analogy) have been constructed to satisfy functional requirements of implicit commonsense psychological theories held by researchers and nonresearchers alike. Rather than working to avoid the influence of commonsense psychology in cognitive modeling research, we propose to capitalize on progress in developing formal theories of commonsense psychology to explicitly define the functional requirements of cognitive models. We present a taxonomy of 16 classes of cognitive models that correspond to the representational areas that have been addressed in large-scale inferential theories of commonsense psychology. We consider the functional requirements that can be derived from inferential theories for one of these classes, the processes involved in human memory. We argue that the breadth coverage of commonsense theories can be used to better evaluate the explanatory scope of cognitive models, as well as facilitate the investigation of larger-scale cognitive systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ettaile, Emil; Gandhe, Sudeep; Georgiou, Panayiotis G.; Knight, Kevin; Marcu, Daniel; Narayanan, Shrikanth; Traum, David; Belvin, Robert
Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues Proceedings Article
In: Proceedings of the ACL Interactive Poster and Demonstration Sessions, pp. 89–92, Ann Arbor, MI, 2005.
@inproceedings{ettaile_transonics_2005,
title = {Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues},
author = {Emil Ettaile and Sudeep Gandhe and Panayiotis G. Georgiou and Kevin Knight and Daniel Marcu and Shrikanth Narayanan and David Traum and Robert Belvin},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {Proceedings of the ACL Interactive Poster and Demonstration Sessions},
pages = {89–92},
address = {Ann Arbor, MI},
abstract = {We briefly describe a two-way speech-to-speech English-Farsi translation system prototype developed for use in doctorpatient interactions. The overarching philosophy of the developers has been to create a system that enables effective communication, rather than focusing on maximizing component-level performance. The discussion focuses on the general approach and evaluation of the system by an independent government evaluation team.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liao, Wei-Kai; Cohen, Isaac
Classifying Facial Gestures in Presence of Head Motion Proceedings Article
In: IEEE Workshop on Vision for Human-Computer Interaction, San Diego, CA, 2005.
@inproceedings{liao_classifying_2005,
title = {Classifying Facial Gestures in Presence of Head Motion},
author = {Wei-Kai Liao and Isaac Cohen},
url = {http://ict.usc.edu/pubs/Classifying%20Facial%20Gestures%20in%20Presence%20of%20Head%20Motion.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {IEEE Workshop on Vision for Human-Computer Interaction},
address = {San Diego, CA},
abstract = {This paper addresses the problem of automatic facial gestures recognition in an interactive environment. Automatic facial gestures recognition is a difficult problem in computer vision, and most of the work has focused on inferring facial gestures in the context of a static head. In the paper we address the challenging problem of recognizing the facial expressions of a moving head. We present a systematic framework to analyze and classify the facial gestures with the head movement. Our system includes a 3D head pose estimation method to recover the global head motion. After estimating the head pose, the human face is modeled by a collection of face's regions. These regions represent the face model used for locating and extracting temporal facial features. We propose using a locally affine motion model to represent extracted motion fields. The classification consists of a graphical model for robustly representing the dependencies of the selected facial regions and the support vector machine. Our experiments show that this approach could classify human expressions in interactive environments accurately.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chu, Chi-Wei; Cohen, Isaac
Posture and Gesture Recognition using 3D Body Shapes Decomposition Proceedings Article
In: IEEE Workshop on Vision for Human-Computer Interaction, San Diego, CA, 2005.
@inproceedings{chu_posture_2005,
title = {Posture and Gesture Recognition using 3D Body Shapes Decomposition},
author = {Chi-Wei Chu and Isaac Cohen},
url = {http://ict.usc.edu/pubs/Posture%20and%20Gesture%20Recognition%20using%203D%20Body%20Shapes%20Decomposition.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {IEEE Workshop on Vision for Human-Computer Interaction},
address = {San Diego, CA},
abstract = {We present a method for describing arbitrary human posture as a combination of basic postures. This decomposition allows for recognition of a larger number of postures and gestures from a small set of elementary postures called atoms. We propose a modified version of the matching pursuit algorithm for decomposing an arbitrary input posture into a linear combination of primary and secondary atoms. These atoms are represented through their shape descriptor inferred from the 3D visual-hull of the human body posture. Using an atom-based description of postures increases tremendously the set of recognizable postures while reducing the required training data set. A gesture recognition system based on the atom decomposition and Hidden Markov Model (HMM) is also described. Instead of representing gestures as HMM transition of postures, we separate the description of gestures as two HMMs, each describing the transition of Primary/Secondary atoms; thus greatly reducing the size of state space of HMM. We illustrate the proposed approach for posture and gesture recognition method on a set of video streams captured by four synchronous cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.