Publications
Search
Rickel, Jeff
Intelligent Virtual Agents for Education and Training: Opportunities and Challenges Proceedings Article
In: Intelligent Virtual Agents: The 3rd International Workshop, Madrid, Spain, 2001.
@inproceedings{rickel_intelligent_2001,
title = {Intelligent Virtual Agents for Education and Training: Opportunities and Challenges},
author = {Jeff Rickel},
url = {http://ict.usc.edu/pubs/Intelligent%20Virtual%20Agents%20for%20Education%20and%20Training-%20Opportunities%20and%20Challenges.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Intelligent Virtual Agents: The 3rd International Workshop},
address = {Madrid, Spain},
abstract = {Interactive virtual worlds provide a powerful medium for ex- periential learning. Intelligent virtual agents can cohabit virtual worlds with people and facilitate such learning as guides, mentors, and team- mates. This paper reviews the main pedagogical advantages of animated agents in virtual worlds, discusses two key research challenges, and out- lines an ambitious new project addressing those challenges.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
New Factors in Room Equalization Using a Fuzzy Logic Approach Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, New York, NY, 2001.
@inproceedings{bharitkar_new_2001,
title = {New Factors in Room Equalization Using a Fuzzy Logic Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/New%20Factors%20in%20Room%20Equalization%20Using%20a%20Fuzzy%20Logic%20Approach.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {New York, NY},
abstract = {Room acoustical modes, particularly in small rooms, cause a signiï¬cant variation in the room responses measured at di!erent locations. Responses measured only a few cm apart can vary by up to 15-20 dB at certain frequencies. This makes it diffcult to equalize an audio system for multiple simultaneous listeners. Previous methods have utilized multiple microphones and spatial averaging with equal weighting. In this paper we present a different multiple point equalization method. We ï¬rst determine representative prototypical room responses derived from several room responses that share similar characteristics, using the fuzzy unsupervised learning method. These prototypical responses can then be combined to form a general point response. When we use the inverse of the general point response as an equalizing ï¬lter, our results show a signiï¬cant improvement in equalization performance over the spatial averaging methods. This simultaneous equalization is achieved by suppressing the peaks in the room magnitude spectrums. Applications of this method thus include equalization and multiple point sound control at home and in automobiles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans Proceedings Article
In: Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents, Montreal, Canada, 2001.
@inproceedings{marsella_effect_2001,
title = {The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affect-%20Modeling%20the%20Impact%20of%20Emotional%20State%20on%20the%20Behavior%20of%20Interactive%20Virtual%20Humans.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents},
address = {Montreal, Canada},
abstract = {A person's behavior provides signiï¬cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a range of cognitive and motor capabilities, a model of emotional appraisal, and a model of the impact of emotional state on physical behavior. We describe the key research issues, our approach, and an initial implementation in an Army peacekeeping scenario.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Douglas, Jay; Gratch, Jonathan
Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
@inproceedings{douglas_adaptive_2001,
title = {Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After},
author = {Jay Douglas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {Creating dramatic narratives for real-time virtual reality environments is complicated by the lack of temporal distance between the occurrence of an event and its telling in the narrative. This paper describes the application of a multiprocessing operating system architecture to the creation of adaptive narratives, narratives that use autonomous actors or agents to create real-time dramatic experiences for human interactors. We also introduce the notion of dramatic acts and dramatic functions and indicate their use in constructing this real-time drama.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cohen, Jonathan; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Real-Time High-Dynamic Range Texture Mapping Proceedings Article
In: Eurographics Rendering Workshop, 2001.
@inproceedings{cohen_real-time_2001,
title = {Real-Time High-Dynamic Range Texture Mapping},
author = {Jonathan Cohen and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Real-Time%20High-Dynamic%20Range%20Texture%20Mapping.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Eurographics Rendering Workshop},
abstract = {This paper presents a technique for representing and displaying high dynamic-range texture maps (HDRTMs) using current graphics hardware. Dynamic range in real-world environments often far exceeds the range representable in 8-bit per-channel texture maps. The increased realism afforded by a high-dynamic range representation provides improved fidelity and expressiveness for interactive visualization of image-based models. Our technique allows for real-time rendering of scenes with arbitrary dynamic range, limited only by available texture memory. In our technique, high-dynamic range textures are decomposed into sets of 8- bit textures. These 8-bit textures are dynamically reassembled by the graphics hardware's programmable multitexturing system or using multipass techniques and framebuffer image processing. These operations allow the exposure level of the texture to be adjusted continuously and arbitrarily at the time of rendering, correctly accounting for the gamma curve and dynamic range restrictions of the display device. Further, for any given exposure only two 8-bit textures must be resident in texture memory simultaneously. We present implementation details of this technique on various 3D graphics hardware architectures. We demonstrate several applications, including high-dynamic range panoramic viewing with simulated auto-exposure, real-time radiance environment mapping, and simulated Fresnel reflection.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Hill, Randall W.; Gratch, Jonathan; Johnson, W. Lewis; Kyriakakis, Chris; Labore, Catherine; Lindheim, Richard; Marsella, Stacy C.; Miraglia, D.; Moore, Bridget; Morie, Jacquelyn; Rickel, Jeff; Thiebaux, Marcus; Tuch, L.; Whitney, Richard; Douglas, Jay
Toward the Holodeck: Integrating Graphics, Sound, Character and Story Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
@inproceedings{swartout_toward_2001,
title = {Toward the Holodeck: Integrating Graphics, Sound, Character and Story},
author = {William Swartout and Randall W. Hill and Jonathan Gratch and W. Lewis Johnson and Chris Kyriakakis and Catherine Labore and Richard Lindheim and Stacy C. Marsella and D. Miraglia and Bridget Moore and Jacquelyn Morie and Jeff Rickel and Marcus Thiebaux and L. Tuch and Richard Whitney and Jay Douglas},
url = {http://ict.usc.edu/pubs/Toward%20the%20Holodeck-%20Integrating%20Graphics,%20Sound,%20Character%20and%20Story.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {We describe an initial prototype of a holodeck-like environment that we have created for the Mission Rehearsal Exercise Project. The goal of the project is to create an experience learning system where the participants are immersed in an environment where they can encounter the sights, sounds, and circumstances of realworld scenarios. Virtual humans act as characters and coaches in an interactive story with pedagogical goals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Modeling Emotions in the Mission Rehearsal Exercise Proceedings Article
In: Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation, pp. 457–466, Orlando, FL, 2001.
@inproceedings{gratch_modeling_2001,
title = {Modeling Emotions in the Mission Rehearsal Exercise},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Emotions%20in%20the%20Mission%20Rehearsal%20Exercise.pdf},
year = {2001},
date = {2001-05-01},
booktitle = {Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation},
pages = {457–466},
address = {Orlando, FL},
abstract = {This paper discusses our attempts to model realistic human behavior in the context of the Mission Rehearsal Exercise system (MRE), a high-end virtual training environment designed to support dismounted infantry training between a human participant and elements of his command. The system combines immersive graphics, sound, and interactive characters controlled by artificial intelligence programs. Our goal in this paper is to show how some of the daunting subtlety in human behavior can be modeled by intelligent agents and in particular to focus on the role of modeling typical human emotional responses to environmental stimuli.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Proceedings Article
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive 'interlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Browsing Image Collections with Representations of Commonsense Activities Journal Article
In: Journal of the American Society for Information Science and Technology, vol. 52, no. 11, pp. 925–929, 2001.
@article{gordon_browsing_2001,
title = {Browsing Image Collections with Representations of Commonsense Activities},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Browsing%20Image%20Collections%20with%20Representations%20of%20Commonsense%20Activities.PDF},
year = {2001},
date = {2001-01-01},
journal = {Journal of the American Society for Information Science and Technology},
volume = {52},
number = {11},
pages = {925–929},
abstract = {To support browsing-based subject access to image collections, it is necessary to provide users with networks of subject terms that are organized in an intuitive, richly interconnected manner. A principled approach to this task is to organize the subject terms by their relationship to activity contexts that are commonly understood among users. This article describes a methodology for creating networks of subject terms by manually representing a large number of common-sense activities that are broadly related to image subject terms. The application of this methodology to the Library of Congress Thesaurus for Graphic Materials produced 768 representations that supported users of a prototype browsing-based retrieval system in searching large, indexed photograph collections.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.
Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, pp. 278–285, Montreal, Canada, 2001.
@inproceedings{gratch_tears_2001,
title = {Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Tears%20and%20Fears-%20Modeling%20emotions%20and%20emotional%20behaviors%20in%20synthetic%20agents.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
pages = {278–285},
address = {Montreal, Canada},
abstract = {Emotions play a critical role in creating engaging and believable characters to populate virtual worlds. Our goal is to create general computational models to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. In service of this goal, we integrate two complementary approaches to emotional modeling into a single unified system. Gratch's Émile system focuses on the problem of emotional appraisal: how emotions arise from an evaluation of how environmental events relate to an agent's plans and goals. Marsella et al. 's IPD system focuses more on the impact of emotions on behavior, including the impact on the physical expressions of emotional state through suitable choice of gestures and body language. This integrated model is layered atop Steve, a pedagogical agent architecture, and exercised within the context of the Mission Rehearsal Exercise, a prototype system designed to teach decision- making skills in highly evocative situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Debevec, Paul
A Photometric Approach to Digitizing Cultural Artifacts Proceedings Article
In: Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage, Glyfada, Greece, 2001.
@inproceedings{hawkins_photometric_2001,
title = {A Photometric Approach to Digitizing Cultural Artifacts},
author = {Tim Hawkins and Jonathan Cohen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Photometric%20Approach%20to%20Digitizing%20Cultural%20Artifacts.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage},
address = {Glyfada, Greece},
abstract = {In this paper we present a photometry-based approach to the digital documentation of cultural artifacts. Rather than representing an artifact as a geometric model with spatially varying reflectance properties, we instead propose directly representing the artifact in terms of its reflectance field - the manner in which it transforms light into images. The principal device employed in our technique is a computer-controlled lighting apparatus which quickly illuminates an artifact from an exhaustive set of incident illumination directions and a set of digital video cameras which record the artifact's appearance under these forms of illumination. From this database of recorded images, we compute linear combinations of the captured images to synthetically illuminate the object under arbitrary forms of complex incident illumination, correctly capturing the effects of specular reflection, subsurface scattering, self-shadowing, mutual illumination, and complex BRDF's often present in cultural artifacts. We also describe a computer application that allows users to realistically and interactively relight digitized artifacts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, C. M.; Narayanan, Shrikanth; Pieraccin, R.
Recognition of Negative Emotions from the Speech Signal Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop (ASRU 2001), 2001.
@inproceedings{lee_recognition_2001,
title = {Recognition of Negative Emotions from the Speech Signal},
author = {C. M. Lee and Shrikanth Narayanan and R. Pieraccin},
url = {http://ict.usc.edu/pubs/Recognition%20of%20Negative%20Emotions%20from%20the%20Speech%20Signal.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop (ASRU 2001)},
abstract = {This paper reports on methods for automatic classification of spoken utterances based on the emotional state of the speaker. The data set used for the analysis comes from a corpus of human- machine dialogs recorded from a commercial application deployed by SpeechWorks. Linear discriminant classification with Gaussian class-conditional probability distribution and knearest neighborhood methods are used to classify utterances into two basic emotion states, negative and non-negative. The features used by the classifiers are utterance-level statistics of the fundamental frequency and energy of the speech signal. To improve classification performance, two specific feature selection methods are used; namely, promising first selection and forward feature selection. Principal component analysis is used to reduce the dimensionality of the features while maximizing classification accuracy. Improvements obtained by feature selection and PCA are reported in this paper. We reported the results.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Tchou, Chris; Debevec, Paul
Light Stage 2.0 Proceedings Article
In: SIGGRAPH Technical Sketches, pp. 217, 2001.
@inproceedings{hawkins_light_2001,
title = {Light Stage 2.0},
author = {Tim Hawkins and Jonathan Cohen and Chris Tchou and Paul Debevec},
url = {http://ict.usc.edu/pubs/Light%20Stage%202.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
pages = {217},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olsen, Mari; Traum, David; Ess-Dykema, Carol Van; Weinberg, Amy
Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System Proceedings Article
In: Machine Translation Summit VIII, Santiago de Compostela, Spain, 2001.
@inproceedings{olsen_implicit_2001,
title = {Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System},
author = {Mari Olsen and David Traum and Carol Van Ess-Dykema and Amy Weinberg},
url = {http://ict.usc.edu/pubs/Implicit%20Cues%20for%20Explicit%20Generation-%20Using%20Telicity%20as%20a%20Cue%20for%20Tense%20Structure%20in%20Chinese%20to%20English%20MT%20System.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Machine Translation Summit VIII},
address = {Santiago de Compostela, Spain},
abstract = {In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Tense information is crucial to providing accurate and fluent translations into English. Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is optional in Chinese and is not present in the majority of sentences. We report on a project that assesses the relative contribution of the lexical aspect features of (a)telicity reflected in the Lexical Conceptual Structure of the input text, versus more overt aspectual and adverbial markers of tense, to suggest tense structure in the English translation of a Chinese newspaper corpus. Incorporating this information allows a 20% to 35% boost in the accuracy of tense relization with the best accuracy rate of 92% on a corpus of Chinese articles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Dai; Ai, Hongmei; Kyriakakis, Chris; Kuo, C. -C. Jay
Embedded High-Quality Multichannel Audio Coding Proceedings Article
In: Conference on Media Processors, Symposium on Electronic Imaging, San Jose, CA, 2001.
@inproceedings{yang_embedded_2001,
title = {Embedded High-Quality Multichannel Audio Coding},
author = {Dai Yang and Hongmei Ai and Chris Kyriakakis and C. -C. Jay Kuo},
url = {http://ict.usc.edu/pubs/Embedded%20High-Quality%20Multichannel%20Audio%20Coding.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Conference on Media Processors, Symposium on Electronic Imaging},
address = {San Jose, CA},
abstract = {An embedded high-quality multi-channel audio coding algorithms is proposed in this research. The Karhunen-Loeve Transform (KLT) is applied to multichannel audio signals in the pre-processing stage to remove inter-channel redundancy. Then, after processing of several audio coding blocks, transformed coefficients are layered quantized and the bit stream is ordered according to their importance. The multichannel audio bit stream generated by the propoesed algorithm has a fully progressive property, which is highly desirable for audio multicast applications in heterogenous networks. Experimental results show that, compared with the MPEG Advanced Audio Coding (AAC) algorithm, the proposed algorithm achieves a better performance with both the objective MNR (Mask-to-Noise-Ratio) measurement and the subjective listening test at several different bit rates.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Waese, Jamie; Debevec, Paul
A Real Time High Dynamic Range Light Probe Proceedings Article
In: SIGGRAPH Technical Sketches, 2001.
@inproceedings{waese_real_2001,
title = {A Real Time High Dynamic Range Light Probe},
author = {Jamie Waese and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Real%20Time%20High%20Dynamic%20Range%20Light%20Probe.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
A Cluster Centroid Method for Room Response Equalization at Multiple Locations Proceedings Article
In: IEEE Workshop on the Applications of Signal Processing to Audio and Acoustics, pp. 55–58, New Platz, NY, 2001, ISBN: 0-7803-7126-7.
@inproceedings{bharitkar_cluster_2001,
title = {A Cluster Centroid Method for Room Response Equalization at Multiple Locations},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/A%20CLUSTER%20CENTROID%20METHOD%20FOR%20ROOM%20RESPONSE%20EQUALIZATION%20AT%20MULTIPLE%20LOCATIONS.pdf},
isbn = {0-7803-7126-7},
year = {2001},
date = {2001-01-01},
booktitle = {IEEE Workshop on the Applications of Signal Processing to Audio and Acoustics},
pages = {55–58},
address = {New Platz, NY},
abstract = {In this paper we address the problem of simultaneous room response equalization for multiple listeners. Traditional approaches to this problem have used a single microphone at the listening position to measure impulse responses from a loudspeaker and then use an inverse filter to correct the frequency response. The problem with that approach is that it only works well for that one point and in most cases is not practical even for one listener with a typical ear spacing of 18 cm. It does not work at all for other listeners in the room, or if the listener changes positions even slightly. We propose a new approach that is based on the Fuzzy c-means clustering technique. We use this method to design equalization filters and demonstrate that we can achieve better equalization performance for several locations in the room simultaneously as compared to single point or simple averaging methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Srinivasamurthy, Naveen; Narayanan, Shrikanth; Ortega, Antonio
Use of Model Transformations for Distributed Speech Recognition Proceedings Article
In: 4th ISCA Tutorial and Research Workshop on Speech Synthesis, pp. 113–116, Sophia Antipolis, France, 2001.
@inproceedings{srinivasamurthy_use_2001,
title = {Use of Model Transformations for Distributed Speech Recognition},
author = {Naveen Srinivasamurthy and Shrikanth Narayanan and Antonio Ortega},
url = {http://ict.usc.edu/pubs/Use%20of%20Model%20Transformations%20for%20Distributed%20Speech%20Recognition.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {4th ISCA Tutorial and Research Workshop on Speech Synthesis},
pages = {113–116},
address = {Sophia Antipolis, France},
abstract = {Due to bandwidth limitations, the speech recognizer in distributed speech recognition (DSR) applications has to use encoded speech - either traditional speech encoding or speech encoding optimized for recognition. The penalty incurred in reducing the bitrate is degradation in speech recognition performance. The diversity of the applications using DSR implies that a variety of speech encoders can be used to compress speech. By treating the encoder variability as a mismatch we propose using model transformation to reduce the speech recognition performance degradation. The advantage of using model transformation is that only a single model set needs to be trained at the server, which can be adapted on the fly to the input speech data. We were able to reduce the word error rate by 61.9%, 63.3% and 56.3% for MELP, GSM and MFCC-encoded data, respectively, by using MAP adaptation, which shows the generality of our proposed scheme.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Damiano, Rossana; Traum, David
Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems Proceedings Article
In: NAACL 2001 Workshop on Adaptation in Dialogue Systems, 2001.
@inproceedings{damiano_anticipatory_2001,
title = {Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems},
author = {Rossana Damiano and David Traum},
url = {http://ict.usc.edu/pubs/Anticipatory%20planning%20for%20decision-theoretic%20grounding%20and%20task%20advancement%20in%20mixed-initiative%20dialogue%20systems.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {NAACL 2001 Workshop on Adaptation in Dialogue Systems},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations Proceedings Article
In: Proceedings of 23rd Annual Conference of the Cognitive Science Society, Edinburgh, Scotland, 2001.
@inproceedings{marsella_modeling_2001,
title = {Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20of%20Emotions%20and%20Plans%20in%20Multi-Agent%20Simulations.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 23rd Annual Conference of the Cognitive Science Society},
address = {Edinburgh, Scotland},
abstract = {The goal of this research is to create general computational models of the interplay between affect, cognition and behavior. These models are being designed to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. We attempt to capture both the cognitive and behavioral aspects of emotion, circumscribed to the role emotions play in the performance of concrete physical tasks. We address how emotions arise from an evaluation of the relationship between environmental events and an agent's plans and goals, as well as the impact of emotions on behavior, in particular the impact on the physical expressions of emotional state through suitable choice of gestures and body language. The approach is illustrated within a virtual reality training environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.