Publications
Search
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
Virtual Humans for non-team interaction training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids, Utrecht, Netherlands, 2005.
@inproceedings{traum_virtual_2005,
title = {Virtual Humans for non-team interaction training},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20non-team%20interaction%20training.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids},
address = {Utrecht, Netherlands},
abstract = {We describe a model of virtual humans to be used in training for non-team interactions, such as negotiating with people from other organizations. The virtual humans build on existing task, dialogue, and emotion models, with an added model of trust, which are used to understand and produce interactional moves. The model has been implemented within an agent in the SASO-ST system, and some example dialogues are given, illustrating the necessity for building social bonds.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a computational model of emotion Journal Article
In: Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004, vol. 11, no. 1, pp. 23–43, 2005.
@article{gratch_evaluating_2005,
title = {Evaluating a computational model of emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20computational%20model%20of%20emotion.pdf},
year = {2005},
date = {2005-07-01},
journal = {Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004},
volume = {11},
number = {1},
pages = {23–43},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we evaluate them against the phenomena they purport to model. In this paper, we present one method to evaluate an emotion model that compares the behavior of the model against human behavior using a standard clinical instrument for assessing human emotion and coping. We use this method to evaluate the Emotion and Adaptation (EMA) model of emotion Gratch and Marsella. The evaluation highlights strengths of the approach and identifies where the model needs further development.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Riedl, Mark O.; Lane, H. Chad; Hill, Randall W.; Swartout, William
Automated Story Direction and Intelligent Tutoring: Towards a Unifying Architecture Proceedings Article
In: AI and Education 2005 Workshop on Narrative Learning Environments, Amsterdam, The Netherlands, 2005.
@inproceedings{riedl_automated_2005,
title = {Automated Story Direction and Intelligent Tutoring: Towards a Unifying Architecture},
author = {Mark O. Riedl and H. Chad Lane and Randall W. Hill and William Swartout},
url = {http://ict.usc.edu/pubs/Automated%20Story%20Direction%20and%20Intelligent%20Tutoring-%20Towards%20a%20Unifying%20Architecture.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {AI and Education 2005 Workshop on Narrative Learning Environments},
address = {Amsterdam, The Netherlands},
abstract = {Recently, interactive storytelling systems H systems that allow a user to make decisions that can potentially impact the direction of a narrative H have been applied to training and education. Interactive storytelling systems often rely on an automated story director to manage the userKs experience. The focus of an automated director is the emergence of a narrative-like experience for the user. In contrast, intelligent tutors traditionally address the acquisition or strengthening of a learner's knowledge. Our goal is to build training simulations that cultivate compelling storylines while simultaneously maintaining a pedagogical presence by incorporating both automated story direction and intelligent tutoring into an immersive environment. But what is the relationship between an automated director and an intelligent tutor? In this paper, we discuss the similarities and differences of automated story directors and intelligent tutors and, based on our analysis, recommend an architecture for building narrative-based training simulations that utilize both effectively and without conflict.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Kim, Gerard J.; Yeh, Shih-Ching; Thiebaux, Marcus; Hwang, Jayne; Buckwalter, John Galen
Development of a Benchmarking Scenario for Testing 3D User Interface Devices and Interaction Methods Proceedings Article
In: Proceedings of the 11th International Conference on Human Computer Interaction, Las Vegas, NV, 2005.
@inproceedings{rizzo_development_2005,
title = {Development of a Benchmarking Scenario for Testing 3D User Interface Devices and Interaction Methods},
author = {Albert Rizzo and Gerard J. Kim and Shih-Ching Yeh and Marcus Thiebaux and Jayne Hwang and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20Benchmarking%20Scenario%20for%20Testing%203D%20User%20Interface%20Devices%20and%20Interaction%20Methods.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {Proceedings of the 11th International Conference on Human Computer Interaction},
address = {Las Vegas, NV},
abstract = {To address a part of the challenge of testing and comparing various 3D user interface devices and methods, we are currently developing and testing a VR 3D User Interface benchmarking scenario. The approach outlined in this paper focuses on the capture of human interaction performance on object selection and manipulation tasks using standardized and scalable block configurations that allow for measurement of speed and efficiency with any interaction device or method. The block configurations that we are using as benchmarking stimuli are accompanied by a pure mental rotation visuospatial assessment test. This feature will allow researchers to test usersX existing spatial abilities and statistically parcel out the variability due to innate ability, from the actual hands-on performance metrics. This statistical approach could lead to a more pure analysis of the ergonomic features of interaction devices and methods separate from existing user abilities. An initial test was conducted at two sites using this benchmarking system to make comparisons between 3D/gesture-based and 2D/mouse-based interactions for 3D selection and manipulation. Our preliminary results demonstrated, as expected, that the 3D/gesture based method in general outperformed the 2D/mouse interface. As well there were statistically significant performance differences between different user groups when categorized by their sex, visuospatial ability and educational background.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Commonsense Psychology and the Functional Requirements of Cognitive Models Proceedings Article
In: American Association of Artificial Intelligence Workshop on Modular Construction of Human-Like Intelligence, AAAI Press, Pittsburgh, PA, 2005.
@inproceedings{gordon_commonsense_2005,
title = {Commonsense Psychology and the Functional Requirements of Cognitive Models},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Commonsense%20Psychology%20and%20the%20Functional%20Requirements%20of%20Cognitive%20Models.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {American Association of Artificial Intelligence Workshop on Modular Construction of Human-Like Intelligence},
publisher = {AAAI Press},
address = {Pittsburgh, PA},
abstract = {In this paper we argue that previous models of cognitive abilities (e.g. memory, analogy) have been constructed to satisfy functional requirements of implicit commonsense psychological theories held by researchers and nonresearchers alike. Rather than working to avoid the influence of commonsense psychology in cognitive modeling research, we propose to capitalize on progress in developing formal theories of commonsense psychology to explicitly define the functional requirements of cognitive models. We present a taxonomy of 16 classes of cognitive models that correspond to the representational areas that have been addressed in large-scale inferential theories of commonsense psychology. We consider the functional requirements that can be derived from inferential theories for one of these classes, the processes involved in human memory. We argue that the breadth coverage of commonsense theories can be used to better evaluate the explanatory scope of cognitive models, as well as facilitate the investigation of larger-scale cognitive systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ettaile, Emil; Gandhe, Sudeep; Georgiou, Panayiotis G.; Knight, Kevin; Marcu, Daniel; Narayanan, Shrikanth; Traum, David; Belvin, Robert
Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues Proceedings Article
In: Proceedings of the ACL Interactive Poster and Demonstration Sessions, pp. 89–92, Ann Arbor, MI, 2005.
@inproceedings{ettaile_transonics_2005,
title = {Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues},
author = {Emil Ettaile and Sudeep Gandhe and Panayiotis G. Georgiou and Kevin Knight and Daniel Marcu and Shrikanth Narayanan and David Traum and Robert Belvin},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {Proceedings of the ACL Interactive Poster and Demonstration Sessions},
pages = {89–92},
address = {Ann Arbor, MI},
abstract = {We briefly describe a two-way speech-to-speech English-Farsi translation system prototype developed for use in doctorpatient interactions. The overarching philosophy of the developers has been to create a system that enables effective communication, rather than focusing on maximizing component-level performance. The discussion focuses on the general approach and evaluation of the system by an independent government evaluation team.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liao, Wei-Kai; Cohen, Isaac
Classifying Facial Gestures in Presence of Head Motion Proceedings Article
In: IEEE Workshop on Vision for Human-Computer Interaction, San Diego, CA, 2005.
@inproceedings{liao_classifying_2005,
title = {Classifying Facial Gestures in Presence of Head Motion},
author = {Wei-Kai Liao and Isaac Cohen},
url = {http://ict.usc.edu/pubs/Classifying%20Facial%20Gestures%20in%20Presence%20of%20Head%20Motion.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {IEEE Workshop on Vision for Human-Computer Interaction},
address = {San Diego, CA},
abstract = {This paper addresses the problem of automatic facial gestures recognition in an interactive environment. Automatic facial gestures recognition is a difficult problem in computer vision, and most of the work has focused on inferring facial gestures in the context of a static head. In the paper we address the challenging problem of recognizing the facial expressions of a moving head. We present a systematic framework to analyze and classify the facial gestures with the head movement. Our system includes a 3D head pose estimation method to recover the global head motion. After estimating the head pose, the human face is modeled by a collection of face's regions. These regions represent the face model used for locating and extracting temporal facial features. We propose using a locally affine motion model to represent extracted motion fields. The classification consists of a graphical model for robustly representing the dependencies of the selected facial regions and the support vector machine. Our experiments show that this approach could classify human expressions in interactive environments accurately.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chu, Chi-Wei; Cohen, Isaac
Posture and Gesture Recognition using 3D Body Shapes Decomposition Proceedings Article
In: IEEE Workshop on Vision for Human-Computer Interaction, San Diego, CA, 2005.
@inproceedings{chu_posture_2005,
title = {Posture and Gesture Recognition using 3D Body Shapes Decomposition},
author = {Chi-Wei Chu and Isaac Cohen},
url = {http://ict.usc.edu/pubs/Posture%20and%20Gesture%20Recognition%20using%203D%20Body%20Shapes%20Decomposition.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {IEEE Workshop on Vision for Human-Computer Interaction},
address = {San Diego, CA},
abstract = {We present a method for describing arbitrary human posture as a combination of basic postures. This decomposition allows for recognition of a larger number of postures and gestures from a small set of elementary postures called atoms. We propose a modified version of the matching pursuit algorithm for decomposing an arbitrary input posture into a linear combination of primary and secondary atoms. These atoms are represented through their shape descriptor inferred from the 3D visual-hull of the human body posture. Using an atom-based description of postures increases tremendously the set of recognizable postures while reducing the required training data set. A gesture recognition system based on the atom decomposition and Hidden Markov Model (HMM) is also described. Instead of representing gestures as HMM transition of postures, we separate the description of gestures as two HMMs, each describing the transition of Primary/Secondary atoms; thus greatly reducing the size of state space of HMM. We illustrate the proposed approach for posture and gesture recognition method on a set of video streams captured by four synchronous cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Velson, Martin; Hill, Randall W.
Modeling Dynamic Perceptual Attention in Complex Virtual Environments Proceedings Article
In: Conference on Behavior Representation in Modeling and Simulation (BRIMS), Universal City, CA, 2005.
@inproceedings{kim_modeling_2005,
title = {Modeling Dynamic Perceptual Attention in Complex Virtual Environments},
author = {Youngjun Kim and Martin Velson and Randall W. Hill},
url = {http://ict.usc.edu/pubs/Modeling%20Dynamic%20Perceptual%20Attention%20in%20Complex%20Virtual%20Environments.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Conference on Behavior Representation in Modeling and Simulation (BRIMS)},
address = {Universal City, CA},
abstract = {An important characteristic of a virtual human is the ability to direct its perceptual attention to entities and areas in a virtual environment in a manner that appears believable and serves a functional purpose. In this paper, we describe a perceptual attention model that integrates perceptual attention that mediates top-down and bottom-up attention processes of virtual humans within complex virtual environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hobbs, Jerry R.; Gordon, Andrew S.
Encoding Knowledge of Commonsense Psychology Proceedings Article
In: 7th International Symposium on Logical Formalizations of Commonsense Reasoning, Corfu, Greece, 2005.
@inproceedings{hobbs_encoding_2005,
title = {Encoding Knowledge of Commonsense Psychology},
author = {Jerry R. Hobbs and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Encoding%20Knowledge%20of%20Commonsense%20Psychology.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {7th International Symposium on Logical Formalizations of Commonsense Reasoning},
address = {Corfu, Greece},
abstract = {Introduction: In previous papers (Gordon and Hobbs, 2003, 2004) we have described a methodology for determining what knowledge should be included in the knowledge base for an intelligent agent, capable of constructing and executing plans to achieve its goals. An intelligent agent is at least a planning mechanism, so Gordon (2004) asked what concepts are necessary for the common strategies that people use in achieving their goals. He investigated ten different domains, including politics, personal relationships, artistic performance, and warfare, and collected 372 strategies. He authored representations of these strategies in order to identify a controlled vocabulary involving of concepts. These concepts were categorized into 48 different representational areas, such as sets, space, and time. Thirty of the representational areas, involving 635 concepts, were concerned with commonsense psychology; among these are memory, knowledge management, planning, and so on. This result by itself demonstrates the very great importance of commonsense psychology in the construction of intelligent agents. Gordon et al. (2003) then, to deï¬ne further each of the representational areas, augmented the list of concepts by investigating the English language expressions for concepts in each area. The result was a list of 528 concepts, a set that identiï¬es the target coverage of a formal theory of commonsense psychology. The authors began the development of formal theories that would encompass this list of concepts. In our earlier work (Gordon and Hobbs, 2003), we described the ï¬rst theory we constructed, memory, as an illustration of the method. We have now completed 14 of the 30 theories, and this paper provides an overview of this work as we close in on the halfway mark.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
A Computational Model of Dynamic Perceptual Attention for Virtual Humans Proceedings Article
In: Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation, Universal City, CA, 2005.
@inproceedings{kim_computational_2005,
title = {A Computational Model of Dynamic Perceptual Attention for Virtual Humans},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Dynamic%20Perceptual%20Attention%20for%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation},
address = {Universal City, CA},
abstract = {An important characteristic of a virtual human is the ability to direct its perceptual attention to objects and locations in a virtual environment in a manner that looks believable and serves a functional purpose. We have developed a computational model of perceptual attention that mediates top-down and bottom-up attention processes of virtual humans in virtual environments. In this paper, we propose a perceptual attention model that will integrate perceptual attention toward objects and locations in the environment with the need to look at other parties in a social context.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gomboc, Dave; Solomon, Steve; Core, Mark; Lane, H. Chad; Lent, Michael
Design Recommendations to Support Automated Explanation and Tutoring Proceedings Article
In: Conference on Behavior Representation in Modeling and Simulation (BRIMS), Universal City, CA, 2005.
@inproceedings{gomboc_design_2005,
title = {Design Recommendations to Support Automated Explanation and Tutoring},
author = {Dave Gomboc and Steve Solomon and Mark Core and H. Chad Lane and Michael Lent},
url = {http://ict.usc.edu/pubs/Design%20Recommendations%20to%20Support%20Automated%20Explanation%20and%20Tutoring.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Conference on Behavior Representation in Modeling and Simulation (BRIMS)},
address = {Universal City, CA},
abstract = {The after-action review is an essential component of military training exercises. The use of constructive simulations for training poses a challenge when conducting such reviews, because behavior models are typically designed to simulate satisfactorially, without explicit concern for the interrogation of synthetic entities afterward. Ideally, users could obtain knowledge about not only the choices made by a simulatorEs behavior models, but also the rationale for those choices. This requires a rich representation of behavioral knowledge within the software system. We have integrated our explainable AI system with behavior models and log information from two simulation systems. Selecting examples from these simulators, we identify areas for improvement to facilitate the automation of explanation and tutoring.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Donghui; Hovy, Eduard
MRE: A Study on Evolutionary Language Understanding Proceedings Article
In: Second International Workshop on Natural Language Understanding and Cognitive Science (NLUCS), Miami, Florida, 2005.
@inproceedings{feng_mre_2005,
title = {MRE: A Study on Evolutionary Language Understanding},
author = {Donghui Feng and Eduard Hovy},
url = {http://ict.usc.edu/pubs/MRE-%20A%20Study%20on%20Evolutionary%20Language%20Understanding.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Second International Workshop on Natural Language Understanding and Cognitive Science (NLUCS)},
address = {Miami, Florida},
abstract = {The lack of well-annotated data is always one of the biggest problems for most training-based dialogue systems. Without enough training data, it's almost impossible for a trainable system to work. In this paper, we explore the evolutionary language understanding approach to build a natural language understanding machine in a virtual human training project. We build the initial training data with a finite state machine. The language understanding system is trained based on the automated data first and is improved as more and more real data come in, which is proved by the experimental results.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nijholt, Anton; Traum, David
The Virtuality Continuum Revisited Proceedings Article
In: CHI 2005 Workshop on the Virtuality Continuum Revisited, Portland, OR, 2005.
@inproceedings{nijholt_virtuality_2005,
title = {The Virtuality Continuum Revisited},
author = {Anton Nijholt and David Traum},
url = {http://ict.usc.edu/pubs/The%20Virtuality%20Continuum%20Revisited.pdf},
year = {2005},
date = {2005-04-01},
booktitle = {CHI 2005 Workshop on the Virtuality Continuum Revisited},
address = {Portland, OR},
abstract = {We survey the themes and the aims of a workshop devoted to the state-of-the-art virtuality continuum. In this continuum, ranging from fully virtual to real physical environments, allowing for mixed, augmented and desktop virtual reality, several perspectives can be taken. Originally, the emphasis was on display technologies. Here we take the perspective of the inhabited environment, that is, environments positioned somewhere on this continuum that are inhabited by virtual (embodied) agents, that interact with each other and with their human partners. Hence, we look at it from the multi-party interaction perspective. In this workshop we will investigate the current state of the art, its shortcomings and a future research agenda.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Peterson, Michael J.; Kyriakakis, Chris
Choosing Candidate Locations for Source Localization Proceedings Article
In: International Workshop on Hands Free Communication and Microphone Arrays, Rutgers, NY, 2005.
@inproceedings{peterson_choosing_2005,
title = {Choosing Candidate Locations for Source Localization},
author = {Michael J. Peterson and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/CHOOSING%20CANDIDATE%20LOCATIONS%20FOR%20SOURCE%20LOCALIZATION.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {International Workshop on Hands Free Communication and Microphone Arrays},
address = {Rutgers, NY},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swanson, Reid; Gordon, Andrew S.
Automated Commonsense Reasoning About Human Memory Proceedings Article
In: AAAI Spring Symposium on Metacognitive Computing, Stanford, CA, 2005.
@inproceedings{swanson_automated_2005,
title = {Automated Commonsense Reasoning About Human Memory},
author = {Reid Swanson and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Automated%20Commonsense%20Reasoning%20About%20Human%20Memory.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {AAAI Spring Symposium on Metacognitive Computing},
address = {Stanford, CA},
abstract = {Metacognitive reasoning in computational systems will be enabled by the development of formal theories that have broad coverage over mental states and processes as well as inferential competency. In this paper we evaluate the inferential competency of an existing formal theory of commonsense human memory by attempting to use it to validate the appropriateness of a commonsense memory strategy. We formulate a particular memory strategy (to create an associated obstacle) as a theorem in first-order predicate calculus. We then attempt to validate this strategy by showing that it is entailed by the axioms of the theory we evaluated. These axioms were encoded into the syntax of an automated reasoning system, which was used to automatically generate inferences and search for formal proofs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Busso, Carlos; Hernanz, Sergi; Chu, Chi-Wei; Kwon, Soon-il; Lee, Sung; Georgiou, Panayiotis G.; Cohen, Isaac; Narayanan, Shrikanth
Smart Room: Participant and Speaker Localization and Identification Proceedings Article
In: Proceedings of the IEEE Conference on Acoustics, Speech and Signal Processing, Philadelphia, PA, 2005.
@inproceedings{busso_smart_2005,
title = {Smart Room: Participant and Speaker Localization and Identification},
author = {Carlos Busso and Sergi Hernanz and Chi-Wei Chu and Soon-il Kwon and Sung Lee and Panayiotis G. Georgiou and Isaac Cohen and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/SMART%20ROOM-%20PARTICIPANT%20AND%20SPEAKER%20LOCALIZATION%20AND%20IDENTIFICATION.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {Proceedings of the IEEE Conference on Acoustics, Speech and Signal Processing},
address = {Philadelphia, PA},
abstract = {Our long-term objective is to create Smart Room Technologies that are aware of the users presence and their behavior and can become an active, but not an intrusive, part of the interaction. In this work, we present a multimodal approach for estimating and tracking the location and identity of the participants including the active speaker. Our smart room design contains three user-monitoring systems: four CCD cameras, an omnidirectional camera and a 16 channel microphone array. The various sensory modalities are processed both individually and jointly and it is shown that the multimodal approach results in signiï¬cantly improved performance in spatial localization, identiï¬cation and speech activity detection of the participants.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Treskunov, Anton; Piepol, Diane
Leveraging Hollywood Set Design Techniques to Enhance Ad Hoc Immersive Display Systems Proceedings Article
In: IEEE VR Emerging Displays Workshop, Bonn, Germany, 2005.
@inproceedings{pair_leveraging_2005,
title = {Leveraging Hollywood Set Design Techniques to Enhance Ad Hoc Immersive Display Systems},
author = {Jarrell Pair and Anton Treskunov and Diane Piepol},
url = {http://ict.usc.edu/pubs/Leveraging%20Hollywood%20Set%20Design%20Techniques%20to%20Enhance%20Ad%20Hoc%20Immersive%20Display%20Systems.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {IEEE VR Emerging Displays Workshop},
address = {Bonn, Germany},
abstract = {Over the past four years, the FlatWorld project [1] at the University of Southern California Institute for Creative Technologies has exploited ad hoc immersive display techniques to prototype virtual reality education and training applications. While our approach is related to traditional immersive projection systems such as the CAVE [2], our work draws extensively upon techniques widely used in Hollywood sets and theme parks. Our first display system, initially prototyped in 2001, enables wide area virtual environments in which participants can maneuver through simulated rooms, buildings, or streets. In 2004, we expanded our work by experimenting with transparent projection screens. To date, we have used this display technique for presenting life size interactive characters with a pseudo-holographic appearance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Dagen; Narayanan, Shrikanth
Speech Rate Estimation Via Temporal Correlation and Selected Sub-band Correlation Proceedings Article
In: Proceedings of the International Conference on Acoustics, Speech, and Signal Processing (ICASSP), pp. 413–416, Philadelphia, PA, 2005.
@inproceedings{wang_speech_2005,
title = {Speech Rate Estimation Via Temporal Correlation and Selected Sub-band Correlation},
author = {Dagen Wang and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/SPEECH%20RATE%20ESTIMATION%20VIA%20TEMPORAL%20CORRELATION%20AND%20SELECTED%20SUB-BAND%20CORRELATION.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {Proceedings of the International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
pages = {413–416},
address = {Philadelphia, PA},
abstract = {In this paper, we propose a novel method for speech rate estimation without requiring automatic speech recognition. It extends the methods of spectral subband correlation by including temporal correlation and the use of selecting prominent spectral subbands for correlation. Further more, to address some of the practical issues in previously published methods, we introduce some novel components into the algorithm such as the use of pitch confidence, magnifying window, relative peak measure and relative threshold. By selecting the parameters and thresholds from realistic development sets, this method achieves a 0.972 correlation coefficient on syllable number estimation and a 0.706 correlation on speech rate estimation. This result is about 6.9% improvement than current best single estimator and 3.5% improvement than current multi-estimator evaluated on the same switchboard database.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Peterson, Michael J.; Kyriakakis, Chris
Hybrid Algorithm for Robust, Real-time Source Localization in the Reverberant Environments Proceedings Article
In: International Conference on Acoustics, Speech and Signal Processing, Philadelphia, PA, 2005.
@inproceedings{peterson_hybrid_2005,
title = {Hybrid Algorithm for Robust, Real-time Source Localization in the Reverberant Environments},
author = {Michael J. Peterson and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/HYBRID%20ALGORITHM%20FOR%20ROBUST,%20REAL-TIME%20SOURCE%20LOCALIZATION%20IN%20REVERBERANT%20ENVIRONMENTS.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {International Conference on Acoustics, Speech and Signal Processing},
address = {Philadelphia, PA},
abstract = {The location of an acoustical source can be found robustly using the Steered Response Pattern - Phase Transform (SRP-PHAT) algorithm. However SRP-PHAT can be computationally expensive, requiring a search of a large number of candidate locations. The required spacing between these locations is dependent on sampling rate, microphone array geometry, and source location. In this work, a novel method will be presented that calculates a smaller number of test points using an efï¬cient closed-form localization algorithm. This method signiï¬cantly reduces the number of calculations, while still remaining robust in acoustical environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.