Publications
Search
Hawkins, Tim; Cohen, Jonathan; Debevec, Paul
A Photometric Approach to Digitizing Cultural Artifacts Proceedings Article
In: Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage, Glyfada, Greece, 2001.
@inproceedings{hawkins_photometric_2001,
title = {A Photometric Approach to Digitizing Cultural Artifacts},
author = {Tim Hawkins and Jonathan Cohen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Photometric%20Approach%20to%20Digitizing%20Cultural%20Artifacts.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage},
address = {Glyfada, Greece},
abstract = {In this paper we present a photometry-based approach to the digital documentation of cultural artifacts. Rather than representing an artifact as a geometric model with spatially varying reflectance properties, we instead propose directly representing the artifact in terms of its reflectance field - the manner in which it transforms light into images. The principal device employed in our technique is a computer-controlled lighting apparatus which quickly illuminates an artifact from an exhaustive set of incident illumination directions and a set of digital video cameras which record the artifact's appearance under these forms of illumination. From this database of recorded images, we compute linear combinations of the captured images to synthetically illuminate the object under arbitrary forms of complex incident illumination, correctly capturing the effects of specular reflection, subsurface scattering, self-shadowing, mutual illumination, and complex BRDF's often present in cultural artifacts. We also describe a computer application that allows users to realistically and interactively relight digitized artifacts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, C. M.; Narayanan, Shrikanth; Pieraccin, R.
Recognition of Negative Emotions from the Speech Signal Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop (ASRU 2001), 2001.
@inproceedings{lee_recognition_2001,
title = {Recognition of Negative Emotions from the Speech Signal},
author = {C. M. Lee and Shrikanth Narayanan and R. Pieraccin},
url = {http://ict.usc.edu/pubs/Recognition%20of%20Negative%20Emotions%20from%20the%20Speech%20Signal.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop (ASRU 2001)},
abstract = {This paper reports on methods for automatic classification of spoken utterances based on the emotional state of the speaker. The data set used for the analysis comes from a corpus of human- machine dialogs recorded from a commercial application deployed by SpeechWorks. Linear discriminant classification with Gaussian class-conditional probability distribution and knearest neighborhood methods are used to classify utterances into two basic emotion states, negative and non-negative. The features used by the classifiers are utterance-level statistics of the fundamental frequency and energy of the speech signal. To improve classification performance, two specific feature selection methods are used; namely, promising first selection and forward feature selection. Principal component analysis is used to reduce the dimensionality of the features while maximizing classification accuracy. Improvements obtained by feature selection and PCA are reported in this paper. We reported the results.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Tchou, Chris; Debevec, Paul
Light Stage 2.0 Proceedings Article
In: SIGGRAPH Technical Sketches, pp. 217, 2001.
@inproceedings{hawkins_light_2001,
title = {Light Stage 2.0},
author = {Tim Hawkins and Jonathan Cohen and Chris Tchou and Paul Debevec},
url = {http://ict.usc.edu/pubs/Light%20Stage%202.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
pages = {217},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olsen, Mari; Traum, David; Ess-Dykema, Carol Van; Weinberg, Amy
Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System Proceedings Article
In: Machine Translation Summit VIII, Santiago de Compostela, Spain, 2001.
@inproceedings{olsen_implicit_2001,
title = {Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System},
author = {Mari Olsen and David Traum and Carol Van Ess-Dykema and Amy Weinberg},
url = {http://ict.usc.edu/pubs/Implicit%20Cues%20for%20Explicit%20Generation-%20Using%20Telicity%20as%20a%20Cue%20for%20Tense%20Structure%20in%20Chinese%20to%20English%20MT%20System.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Machine Translation Summit VIII},
address = {Santiago de Compostela, Spain},
abstract = {In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Tense information is crucial to providing accurate and fluent translations into English. Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is optional in Chinese and is not present in the majority of sentences. We report on a project that assesses the relative contribution of the lexical aspect features of (a)telicity reflected in the Lexical Conceptual Structure of the input text, versus more overt aspectual and adverbial markers of tense, to suggest tense structure in the English translation of a Chinese newspaper corpus. Incorporating this information allows a 20% to 35% boost in the accuracy of tense relization with the best accuracy rate of 92% on a corpus of Chinese articles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Dai; Ai, Hongmei; Kyriakakis, Chris; Kuo, C. -C. Jay
Embedded High-Quality Multichannel Audio Coding Proceedings Article
In: Conference on Media Processors, Symposium on Electronic Imaging, San Jose, CA, 2001.
@inproceedings{yang_embedded_2001,
title = {Embedded High-Quality Multichannel Audio Coding},
author = {Dai Yang and Hongmei Ai and Chris Kyriakakis and C. -C. Jay Kuo},
url = {http://ict.usc.edu/pubs/Embedded%20High-Quality%20Multichannel%20Audio%20Coding.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Conference on Media Processors, Symposium on Electronic Imaging},
address = {San Jose, CA},
abstract = {An embedded high-quality multi-channel audio coding algorithms is proposed in this research. The Karhunen-Loeve Transform (KLT) is applied to multichannel audio signals in the pre-processing stage to remove inter-channel redundancy. Then, after processing of several audio coding blocks, transformed coefficients are layered quantized and the bit stream is ordered according to their importance. The multichannel audio bit stream generated by the propoesed algorithm has a fully progressive property, which is highly desirable for audio multicast applications in heterogenous networks. Experimental results show that, compared with the MPEG Advanced Audio Coding (AAC) algorithm, the proposed algorithm achieves a better performance with both the objective MNR (Mask-to-Noise-Ratio) measurement and the subjective listening test at several different bit rates.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Waese, Jamie; Debevec, Paul
A Real Time High Dynamic Range Light Probe Proceedings Article
In: SIGGRAPH Technical Sketches, 2001.
@inproceedings{waese_real_2001,
title = {A Real Time High Dynamic Range Light Probe},
author = {Jamie Waese and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Real%20Time%20High%20Dynamic%20Range%20Light%20Probe.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
A Cluster Centroid Method for Room Response Equalization at Multiple Locations Proceedings Article
In: IEEE Workshop on the Applications of Signal Processing to Audio and Acoustics, pp. 55–58, New Platz, NY, 2001, ISBN: 0-7803-7126-7.
@inproceedings{bharitkar_cluster_2001,
title = {A Cluster Centroid Method for Room Response Equalization at Multiple Locations},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/A%20CLUSTER%20CENTROID%20METHOD%20FOR%20ROOM%20RESPONSE%20EQUALIZATION%20AT%20MULTIPLE%20LOCATIONS.pdf},
isbn = {0-7803-7126-7},
year = {2001},
date = {2001-01-01},
booktitle = {IEEE Workshop on the Applications of Signal Processing to Audio and Acoustics},
pages = {55–58},
address = {New Platz, NY},
abstract = {In this paper we address the problem of simultaneous room response equalization for multiple listeners. Traditional approaches to this problem have used a single microphone at the listening position to measure impulse responses from a loudspeaker and then use an inverse filter to correct the frequency response. The problem with that approach is that it only works well for that one point and in most cases is not practical even for one listener with a typical ear spacing of 18 cm. It does not work at all for other listeners in the room, or if the listener changes positions even slightly. We propose a new approach that is based on the Fuzzy c-means clustering technique. We use this method to design equalization filters and demonstrate that we can achieve better equalization performance for several locations in the room simultaneously as compared to single point or simple averaging methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Srinivasamurthy, Naveen; Narayanan, Shrikanth; Ortega, Antonio
Use of Model Transformations for Distributed Speech Recognition Proceedings Article
In: 4th ISCA Tutorial and Research Workshop on Speech Synthesis, pp. 113–116, Sophia Antipolis, France, 2001.
@inproceedings{srinivasamurthy_use_2001,
title = {Use of Model Transformations for Distributed Speech Recognition},
author = {Naveen Srinivasamurthy and Shrikanth Narayanan and Antonio Ortega},
url = {http://ict.usc.edu/pubs/Use%20of%20Model%20Transformations%20for%20Distributed%20Speech%20Recognition.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {4th ISCA Tutorial and Research Workshop on Speech Synthesis},
pages = {113–116},
address = {Sophia Antipolis, France},
abstract = {Due to bandwidth limitations, the speech recognizer in distributed speech recognition (DSR) applications has to use encoded speech - either traditional speech encoding or speech encoding optimized for recognition. The penalty incurred in reducing the bitrate is degradation in speech recognition performance. The diversity of the applications using DSR implies that a variety of speech encoders can be used to compress speech. By treating the encoder variability as a mismatch we propose using model transformation to reduce the speech recognition performance degradation. The advantage of using model transformation is that only a single model set needs to be trained at the server, which can be adapted on the fly to the input speech data. We were able to reduce the word error rate by 61.9%, 63.3% and 56.3% for MELP, GSM and MFCC-encoded data, respectively, by using MAP adaptation, which shows the generality of our proposed scheme.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Damiano, Rossana; Traum, David
Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems Proceedings Article
In: NAACL 2001 Workshop on Adaptation in Dialogue Systems, 2001.
@inproceedings{damiano_anticipatory_2001,
title = {Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems},
author = {Rossana Damiano and David Traum},
url = {http://ict.usc.edu/pubs/Anticipatory%20planning%20for%20decision-theoretic%20grounding%20and%20task%20advancement%20in%20mixed-initiative%20dialogue%20systems.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {NAACL 2001 Workshop on Adaptation in Dialogue Systems},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations Proceedings Article
In: Proceedings of 23rd Annual Conference of the Cognitive Science Society, Edinburgh, Scotland, 2001.
@inproceedings{marsella_modeling_2001,
title = {Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20of%20Emotions%20and%20Plans%20in%20Multi-Agent%20Simulations.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 23rd Annual Conference of the Cognitive Science Society},
address = {Edinburgh, Scotland},
abstract = {The goal of this research is to create general computational models of the interplay between affect, cognition and behavior. These models are being designed to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. We attempt to capture both the cognitive and behavioral aspects of emotion, circumscribed to the role emotions play in the performance of concrete physical tasks. We address how emotions arise from an evaluation of the relationship between environmental events and an agent's plans and goals, as well as the impact of emotions on behavior, in particular the impact on the physical expressions of emotional state through suitable choice of gestures and body language. The approach is illustrated within a virtual reality training environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Dai; Ai, Hongmei; Kyriakakis, Chris; Kuo, C. -C. Jay
Adaptive Karhunen-Loeve Transform for Enhanced Multichannel Audio Coding Proceedings Article
In: SPIE, San Diego, CA, 2001.
@inproceedings{yang_adaptive_2001,
title = {Adaptive Karhunen-Loeve Transform for Enhanced Multichannel Audio Coding},
author = {Dai Yang and Hongmei Ai and Chris Kyriakakis and C. -C. Jay Kuo},
url = {http://ict.usc.edu/pubs/Adaptive%20Karhunen-Loeve%20Transform%20for%20Enhanced%20Multichannel%20Audio%20Coding.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SPIE},
address = {San Diego, CA},
abstract = {A modified MPEG Advanced Audio Coding (AAC) scheme based on the Karhunen-Loeve transform (KLT) to remove inter-channel redundancy, which is called the MAACKL method, has been proposed in our previous work. However, a straightforward coding of elements of the KLT matrix generates about 240 bits per matrix for typical 5 channel audio contents. Such an overhead is too expensive so that it prevents MAACKL from updating KLT dynamically in a short period of time. In this research, we study the de-correlation efficiency of adaptive KLT as well as an efficient way to encode elements of the KLT matrix via vector quantization. The effect due to different quantization accuracy and adaptation period is examined carefully. It is demonstrated that with the smallest possible number of bits per matrix and a moderately long KLT adaptation time, the MAACKL algorithm can still generate a very good coding performance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sadek, Ramy; Miraglia, Dave; Morie, Jacquelyn
3D Sound Design and Technology for the Sensory Environments Evaluations Project: Phase 1 Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2001, 2001.
@techreport{sadek_3d_2001,
title = {3D Sound Design and Technology for the Sensory Environments Evaluations Project: Phase 1},
author = {Ramy Sadek and Dave Miraglia and Jacquelyn Morie},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2001.pdf},
year = {2001},
date = {2001-01-01},
number = {ICT TR 01.2001},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Debevec, Paul; Hawkins, Tim; Tchou, Chris; Duiker, Haarm-Pieter; Sarokin, Westley
Acquiring the Reflectance Field of a Human Face Proceedings Article
In: SIGGRAPH, New Orleans, LA, 2000.
@inproceedings{debevec_acquiring_2000,
title = {Acquiring the Reflectance Field of a Human Face},
author = {Paul Debevec and Tim Hawkins and Chris Tchou and Haarm-Pieter Duiker and Westley Sarokin},
url = {http://ict.usc.edu/pubs/Acquiring%20the%20Re%EF%AC%82ectance%20Field%20of%20a%20Human%20Face.pdf},
year = {2000},
date = {2000-07-01},
booktitle = {SIGGRAPH},
address = {New Orleans, LA},
abstract = {We present a method to acquire the reflectance field of a human face and use these measurements to render the face under arbitrary changes in lighting and viewpoint. We first acquire images of the face from a small set of viewpoints under a dense sampling of incident illumination directions using a light stage. We then construct a reflectance function image for each observed image pixel from its values over the space of illumination directions. From the reflectance functions, we can directly generate images of the face from the original viewpoints in any form of sampled or computed illumination. To change the viewpoint, we use a model of skin reflectance to estimate the appearance of the reflectance functions for novel viewpoints. We demonstrate the technique with synthetic renderings of a person's face under novel illumination and viewpoints.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
Selective Signal Cancellation for Multiple Listener Audio Applications: An Information Theory Approach Proceedings Article
In: IEEE International Conference Multimedia and Expo, New York, NY, 2000.
@inproceedings{bharitkar_selective_2000,
title = {Selective Signal Cancellation for Multiple Listener Audio Applications: An Information Theory Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/SELECTIVE%20SIGNAL%20CANCELLATION%20FOR%20MULTIPLE-LISTENER%20AUDIO%20APPLICATIONS-%20AN%20INFORMATION%20THEORY%20APPROACH.pdf},
year = {2000},
date = {2000-07-01},
booktitle = {IEEE International Conference Multimedia and Expo},
address = {New York, NY},
abstract = {Selectively canceling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, teleconferencing, office, industrial and other applications. The traditional noise cancellation approach is impractical for such applications because it requires sensors that must be placed on the listeners. In this paper we propose an alternative method to minimize signal power in a given location and maximize signal power in another location of interest. A key advantage of this approach would be the need to eliminate sensors. We investigate the use of an information theoretic criterion known as mutual information to design filter coefficients that selectively cancel a signal in one audio channel, and transmit it in another (complementary) channel. Our results show an improvement in power gain at one location in the room relative to the other.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Human-like behavior, alas, demands human-like intellect Proceedings Article
In: Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents, Barcelona, Spain, 2000.
@inproceedings{gratch_human-like_2000,
title = {Human-like behavior, alas, demands human-like intellect},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Human-like%20behavior%20alas%20demands%20human-like%20intellect.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents},
address = {Barcelona, Spain},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Moutchtaris, Athanasios; Reveliotis, Panagiotis; Kyriakakis, Chris
Inverse Filter Design for Immersive Audio Rendering Over Loudspeakers Journal Article
In: IEEE Transactions on Multimedia, vol. 2, no. 2, pp. 77–87, 2000.
@article{moutchtaris_inverse_2000,
title = {Inverse Filter Design for Immersive Audio Rendering Over Loudspeakers},
author = {Athanasios Moutchtaris and Panagiotis Reveliotis and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Inverse%20Filter%20Design%20for%20Immersive%20Audio%20Rendering%20Over%20Loudspeakers.pdf},
year = {2000},
date = {2000-06-01},
journal = {IEEE Transactions on Multimedia},
volume = {2},
number = {2},
pages = {77–87},
abstract = {Immersive audio systems can be used to render virtual sound sources in three-dimensional (3-D) space around a listener. This is achieved by simulating the head-related transfer function (HRTF) amplitude and phase characteristics using digital filters. In this paper, we examine certain key signal processing considerations in spatial sound rendering over headphones and loudspeakers. We address the problem of crosstalk inherent in loudspeaker rendering and examine two methods for implementing crosstalk cancellation and loudspeaker frequency response inversion in real time. We demonstrate that it is possible to achieve crosstalk cancellation of 30 dB using both methods, but one of the two (the Fast RLS Transversal Filter Method) offers a significant advantage in terms of computational efficiency. Our analysis is easily extendable to nonsymmetric listening positions and moving listeners.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.; Gratch, Jonathan; Rosenbloom, Paul
Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, Barcelona, Spain, 2000.
@inproceedings{hill_flexible_2000,
title = {Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces},
author = {Randall W. Hill and Jonathan Gratch and Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Flexible%20Group%20Behavior-%20Virtual%20Commanders%20for%20Synthetic%20Battlespaces.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
address = {Barcelona, Spain},
abstract = {This paper describes a project to develop autonomous commander agents for synthetic battlespaces. The commander agents plan missions, monitor their execution, and replan when necessary. To reason about the social aspects of group behavior, the commanders take various social stances that enable them to collaborate with friends, exercise or defer to authority, and thwart their foes. The purpose of this paper is to describe these capabilities and how they came to be through a series of lessons learned while developing autonomous agents for this domain.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Gratch, Jonathan
How Long Can an Agent Look Away From a Target? Proceedings Article
In: 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
@inproceedings{kim_how_2000,
title = {How Long Can an Agent Look Away From a Target?},
author = {Youngjun Kim and Randall W. Hill and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/how%20long%20can%20you%20look%20away%20from%20a%20target.pdf},
year = {2000},
date = {2000-05-01},
booktitle = {9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Situation awareness (SA) is the perception of the elements in the environment within a volume of time and space, the comprehension of their meaning, and the projection of their status in the near future [3]. Although the impact of situation awareness and assessment on humans in complex systems is clear, no one theory for SA has been developed. A critical aspect of the SA problem is that agents must construct an overall view of a dynamically changing world using limited sensor channels. For instance, a (virtual) pilot, who visually tracks the location and direction of several vehicles that he cannot see simultaneously, must shift its visual field of view to scan the environment and to sense the situation involved. How he directs his attention, for how long, and how he efficiently reacquires targets is the central question we address in this paper. We describe the perceptual coordination that helps a virtual pilot efficiently track one or more objects. In SA, it is important for a virtual pilot having a limited visual field of view to gather more information from its environment and to choose appropriate actions to take in the environment without losing the target.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgiou, Panayiotis G.; Kyriakakis, Chris
A Multiple Input Single Output Model for Rendering Virtual Sound Sources in Real Time Proceedings Article
In: Proceedings of ICME 2000, New York, NY, 2000.
@inproceedings{georgiou_multiple_2000,
title = {A Multiple Input Single Output Model for Rendering Virtual Sound Sources in Real Time},
author = {Panayiotis G. Georgiou and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/A%20MULTIPLE%20INPUT%20SINGLE%20OUTPUT%20MODEL%20FOR%20RENDERING%20VIRTUAL%20SOUND%20SOURCES%20IN%20REAL%20TIME.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of ICME 2000},
address = {New York, NY},
abstract = {Accurate localization of sound in 3-D space is based on variations in the spectrum of sound sources. These variations arise mainly from reflection and diffraction effects caused by the pinnae and are described through a set of Head-Related Transfer Functions (HRTF’s) that are unique for each azimuth and elevation angle. A virtual sound source can be rendered in the desired location by filtering with the corresponding HRTF for each ear. Previous work on HRTF modeling has mainly focused on the methods that attempt to model each transfer function individually. These methods are generally computationally-complex and cannot be used for real-time spatial rendering of multiple moving sources. In this work we provide an alternative approach, which uses a multiple input single output state space system to creat a combined model of the HRTF’s for all directions. This method exploits the similarities among the different HRTF’s to achieve a significant reduction in the model size with a minimum loss of accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Èmile: Marshalling Passions in Training and Education Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, pp. 325–332, Barcelona, Spain, 2000.
@inproceedings{gratch_emile_2000,
title = {Èmile: Marshalling Passions in Training and Education},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emile-%20Marshalling%20Passions%20in%20Training%20and%20Education.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
pages = {325–332},
address = {Barcelona, Spain},
abstract = {Emotional reasoning can be an important contribution to automated tutoring and training systems. This paper describes �mile, a model of emotional reasoning that builds upon existing approaches and significantly generalizes and extends their capabilities. The main contribution is to show how an explicit planning model allows a more general treatment of several stages of the reasoning process. The model supports educational applications by allowing agents to appraise the emotional significance of events as they relate to students' (or their own) plans and goals, model and predict the emotional state of others, and alter behavior accordingly.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.