Publications
Search
Malandrakis, Nikolaos; Potamianos, Alexandros; Hsu, Kean J.; Babeva, Kalina N.; Feng, Michelle C.; Davison, Gerald C.; Narayanan, Shrikanth
AFFECTIVE LANGUAGE MODEL ADAPTATION VIA CORPUS SELECTION Proceedings Article
In: proceedings of ICASSP, Florence, Italy, 2014.
@inproceedings{malandrakis_affective_2014,
title = {AFFECTIVE LANGUAGE MODEL ADAPTATION VIA CORPUS SELECTION},
author = {Nikolaos Malandrakis and Alexandros Potamianos and Kean J. Hsu and Kalina N. Babeva and Michelle C. Feng and Gerald C. Davison and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Affective%20language%20model%20adaptation%20via%20corpus%20selection.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {proceedings of ICASSP},
address = {Florence, Italy},
abstract = {Motivated by methods used in language modeling and grammar induction, we propose the use of pragmatic constraints and perplexity as criteria to filter the unlabeled data used to generate the semantic similarity model. We investigate unsupervised adaptation algorithms of the semantic-affective models proposed in [1, 2]. Affective ratings at the utterance level are generated based on an emotional lexicon, which in turn is created using a semantic (similarity) model estimated over raw, unlabeled text. The proposed adaptation method creates task-dependent semantic similarity models and task- dependent word/term affective ratings. The proposed adaptation algorithms are tested on anger/distress detection of transcribed speech data and sentiment analysis in tweets showing significant relative classification error reduction of up to 10%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vaz, Colin; Tsiartas, Andreas; Narayanan, Shrikanth
ENERGY-CONSTRAINED MINIMUM VARIANCE RESPONSE FILTER FOR ROBUST VOWEL SPECTRAL ESTIMATION Proceedings Article
In: Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on, pp. 6275–6279, IEEE, 2014.
@inproceedings{vaz_energy-constrained_2014,
title = {ENERGY-CONSTRAINED MINIMUM VARIANCE RESPONSE FILTER FOR ROBUST VOWEL SPECTRAL ESTIMATION},
author = {Colin Vaz and Andreas Tsiartas and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Energy-Constrained%20Minimum%20Variance%20Response%20Filter%20for%20Robust%20Vowel%20Spectral%20Estimation.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on},
pages = {6275–6279},
publisher = {IEEE},
abstract = {We propose the energy-constrained minimum-variance response (ECMVR) filter to perform robust spectral estimation of vowels. We modify the distortionless constraint of the minimum-variance distortionless response (MVDR) filter and add an energy constraint to its formulation to mitigate the influence of noise on the speech spectrum. We test our ECMVR filter on a vowel classification task with different background noises at various SNR levels. Results show that vowels are classified more accurately in certain noises using MFCC and PLP features extracted from the ECMVR spectrum compared to using features extracted from the FFT and MVDR spectra. Index Terms: frequency estimation, MVDR, robust signal processing, spectral estimation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Degottex, Gilles; Kane, John; Drugman, Thomas; Raitio, Tuomo; Scherer, Stefan
COVAREP - A collaborative voice analysis repository for speech technologies Proceedings Article
In: Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2014), pp. 960–964, IEEE, Florence, Italy, 2014.
@inproceedings{degottex_covarep_2014,
title = {COVAREP - A collaborative voice analysis repository for speech technologies},
author = {Gilles Degottex and John Kane and Thomas Drugman and Tuomo Raitio and Stefan Scherer},
url = {http://ict.usc.edu/pubs/COVAREP%20%e2%80%93%20A%20COLLABORATIVE%20VOICE%20ANALYSIS%20REPOSITORY%20FOR%20SPEECH%20TECHNOLOGIES.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2014)},
pages = {960–964},
publisher = {IEEE},
address = {Florence, Italy},
abstract = {Speech processing algorithms are often developed demonstrating improvements over the state-of-the-art, but sometimes at the cost of high complexity. This makes algorithm reimplementations based on literature difficult, and thus reliable comparisons between published results and current work are hard to achieve. This paper presents a new collaborative and freely available repository for speech processing algorithms called COVAREP, which aims at fast and easy access to new speech processing algorithms and thus facilitating research in the field. We envisage that COVAREP will allow more reproducible research by strengthening complex implementations through shared contributions and openly available code which can be discussed, commented on and corrected by the community. Presently COVAREP contains contributions from five distinct laboratories and we encourage contributions from across the speech processing research field. In this paper, we provide an overview of the current offerings of COVAREP and also include a demonstration of the algorithms through an emotion classification experiment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Stratou, Giota; Shapiro, Ari; Morency, Louis-Philippe; Scherer, Stefan
An Interactive Virtual Audience Platform for Public Speaking Training Proceedings Article
In: Proceedings of International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 1657–1658, ACM, Paris, France, 2014.
@inproceedings{chollet_interactive_2014,
title = {An Interactive Virtual Audience Platform for Public Speaking Training},
author = {Mathieu Chollet and Giota Stratou and Ari Shapiro and Louis-Philippe Morency and Stefan Scherer},
url = {http://ict.usc.edu/pubs/An%20Interactive%20Virtual%20Audience%20Platform%20for%20Public%20Speaking%20Training.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {1657–1658},
publisher = {ACM},
address = {Paris, France},
abstract = {We have developed an interactive virtual audience platform for public speaking training. Users' public speaking behavior is automatically analyzed using audiovisual sensors. The virtual characters display indirect feedback depending on user's behavior descriptors correlated with public speaking performance. We used the system to collect a dataset of public speaking performances in di⬚erent training conditions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
DeVault, David; Artstein, Ron; Benn, Grace; Dey, Teresa; Fast, Edward; Gainer, Alesia; Georgila, Kallirroi; Gratch, Jonathan; Hartholt, Arno; Lhommet, Margaux; Lucas, Gale; Marsella, Stacy C.; Fabrizio, Morbini; Nazarian, Angela; Scherer, Stefan; Stratou, Giota; Suri, Apar; Traum, David; Wood, Rachel; Xu, Yuyu; Rizzo, Albert; Morency, Louis-Philippe
SimSensei Kiosk: A Virtual Human Interviewer for Healthcare Decision Support Proceedings Article
In: Proceedings of the 13th Inter-national Conference on Autonomous Agents and Multiagent Systems (AAMAS 2014), pp. 1061–1068, International Foundation for Autonomous Agents and Multiagent Systems, Paris, France, 2014.
@inproceedings{devault_simsensei_2014,
title = {SimSensei Kiosk: A Virtual Human Interviewer for Healthcare Decision Support},
author = {David DeVault and Ron Artstein and Grace Benn and Teresa Dey and Edward Fast and Alesia Gainer and Kallirroi Georgila and Jonathan Gratch and Arno Hartholt and Margaux Lhommet and Gale Lucas and Stacy C. Marsella and Morbini Fabrizio and Angela Nazarian and Stefan Scherer and Giota Stratou and Apar Suri and David Traum and Rachel Wood and Yuyu Xu and Albert Rizzo and Louis-Philippe Morency},
url = {https://dl.acm.org/citation.cfm?id=2617415},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of the 13th Inter-national Conference on Autonomous Agents and Multiagent Systems (AAMAS 2014)},
pages = {1061–1068},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Paris, France},
abstract = {We present SimSensei Kiosk, an implemented virtual human interviewer designed to create an engaging face-to-face inter-action where the user feels comfortable talking and sharing information. SimSensei Kiosk is also designed to create in- teractional situations favorable to the automatic assessment of distress indicators, de_ned as verbal and nonverbal behav- iors correlated with depression, anxiety or post-traumatic stress disorder (PTSD). In this paper, we summarize the de- sign methodology, performed over the past two years, which is based on three main development cycles: (1) analysis of face-to-face human interactions to identify potential distress indicators, dialogue policies and virtual human gestures, (2) development and analysis of a Wizard-of-Oz prototype sys- tem where two human operators were deciding the spoken and gestural responses, and (3) development of a fully au- tomatic virtual interviewer able to engage users in 15-25 minute interactions. We show the potential of our fully auto- matic virtual human interviewer in a user study, and situate its performance in relation to the Wizard-of-Oz prototype.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kumar, Naveen; Segbroeck, Maarten Van; Audhkhasi, Kartik; Drotár, Peter; Narayanan, Shrikanth S.
Fusion of Diverse Denoising Systems for Robust Automatic Speech Recognition Proceedings Article
In: Proceedings of IEEE International Conference on Audio, Speech and Signal Processing (ICASSP), 2014.
@inproceedings{kumar_fusion_2014,
title = {Fusion of Diverse Denoising Systems for Robust Automatic Speech Recognition},
author = {Naveen Kumar and Maarten Van Segbroeck and Kartik Audhkhasi and Peter Drotár and Shrikanth S. Narayanan},
url = {http://ict.usc.edu/pubs/Fusion%20of%20Diverse%20denoising%20systems%20for%20Robust%20Automatic%20Speech%20Recognition.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of IEEE International Conference on Audio, Speech and Signal Processing (ICASSP)},
abstract = {We present a framework for combining different denoising front-ends for robust speech enhancement for recognition in noisy conditions. This is contrasted against results of optimally fusing diverse parameter settings for a single denoising algorithm. All frontends in the latter case exploit the same denoising algorithm, which combines harmonic decomposition, with noise estimation and spectral subtraction. The set of associated parameters involved in these steps are dependent on the noise conditions. Rather than explicitly tuning them, we suggest a strategy that tries to account for the trade-off between average word error rate and diversity to find an optimal subset of these parameter settings. We present the results on Aurora4 database and also compare against traditional speech enhancement methods e.g. Wiener filtering and spectral subtraction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Can, Dogan; Gibson, James; Vaz, Colin; Georgiou, Panayiotis G.; Narayanan, Shrikanth S.
Barista: A framework for concurrent speech processing by usc-sail Proceedings Article
In: Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on, pp. 3306–3310, IEEE, 2014.
@inproceedings{can_barista_2014,
title = {Barista: A framework for concurrent speech processing by usc-sail},
author = {Dogan Can and James Gibson and Colin Vaz and Panayiotis G. Georgiou and Shrikanth S. Narayanan},
url = {http://ict.usc.edu/pubs/Barista%20-%20A%20Framework%20for%20Concurrent%20Speech%20Processing%20by%20USC-SAIL.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on},
pages = {3306–3310},
publisher = {IEEE},
abstract = {We present Barista, an open-source framework for concurrent speech processing based on the Kaldi speech recognition toolkit and the libcppa actor library. With Barista, we aim to provide an easy-to-use, extensible framework for constructing highly customizable concurrent (and/or distributed) networks for a variety of speech processing tasks. Each Barista network specifies a flow of data between simple actors, concurrent entities communicating by message passing, modeled after Kaldi tools. Leveraging the fast and reliable concurrency and distribution mechanisms provided by libcppa, Barista lets demanding speech processing tasks, such as real-time speech recognizers and complex training workflows, to be scheduled and executed on parallel (and/or distributed) hardware. Barista is released under the Apache License v2.0.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Kang, Sin-Hwa; Bolas, Mark
Virtual Coaches over Mobile Video Proceedings Article
In: Proceedingsof International Conference on Computer Animation and Social Agents (CASA), 2014.
@inproceedings{krum_virtual_2014,
title = {Virtual Coaches over Mobile Video},
author = {David M. Krum and Sin-Hwa Kang and Mark Bolas},
url = {http://ict.usc.edu/pubs/Virtual%20Coaches%20over%20Mobile%20Video.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedingsof International Conference on Computer Animation and Social Agents (CASA)},
abstract = {We hypothesize that the context of a smartphone, how a virtual human is presented within a smartphone app, and indeed, the nature of that app, can profoundly affect how the virtual human is perceived by a real human. We believe that virtual humans, presented over video chat services (such as Skype) and delivered using mobile phones, can be an effective way to deliver coaching applications. We propose to build a prototype system that allows virtual humans to initiate and receive Skype calls. This hardware will enable broadcast of the audio and video imagery of a character. Using this platform and a virtual human, we will conduct two user studies. The first study will examine factors involved in making a mobile video based character seem engaging and “real”. This study will examine how character appearance and the artifacts of the communication channel, such as video and audio quality, can affect rapport with a virtual human. The second study will examine ways to maintain a long-term relationship with a character, leveraging the character’s ability to call and interact with a real human over a longer period of time. These studies will help develop design guidelines for presenting virtual humans over mobile video.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Miller, Chreston; Quek, Francis; Morency, Louis-Philippe
Search Strategies for Pattern Identification in Multimodal Data: Three Case Studies Proceedings Article
In: pp. 273–280, ACM Press, 2014, ISBN: 978-1-4503-2782-4.
@inproceedings{miller_search_2014,
title = {Search Strategies for Pattern Identification in Multimodal Data: Three Case Studies},
author = {Chreston Miller and Francis Quek and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Search%20Strategies%20for%20Pattern%20Identification%20in%20Multimodal%20Data%20Three%20Case%20Studies.pdf},
doi = {10.1145/2578726.2578761},
isbn = {978-1-4503-2782-4},
year = {2014},
date = {2014-04-01},
pages = {273–280},
publisher = {ACM Press},
abstract = {The analysis of multimodal data benefits from meaningful search and retrieval. This paper investigates strategies of searching multimodal data for event patterns. Through three longitudinal case studies, we observed researchers exploring and identifying event patterns in multimodal data. The events were extracted from different multimedia signal sources ranging from annotated video transcripts to interaction logs. Each researcher’s data has varying temporal characteristics (e.g., sparse, dense, or clustered) that posed several challenges for identifying relevant patterns. We identify unique search strategies and better understand the aspects that contributed to each.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Thomas, Jerald; Bashyal, Raghav; Goldstein, Samantha; Suma, Evan
MuVR: A Multi-user Virtual Reality Platform Proceedings Article
In: IEEE Virtual Reality 2014, pp. 115–116, IEEE, Minneapolis, Minnesota, 2014.
@inproceedings{thomas_muvr_2014,
title = {MuVR: A Multi-user Virtual Reality Platform},
author = {Jerald Thomas and Raghav Bashyal and Samantha Goldstein and Evan Suma},
url = {http://ict.usc.edu/pubs/MuVR%20-%20A%20Multi-user%20Virtual%20Reality%20Platform.pdf},
year = {2014},
date = {2014-03-01},
booktitle = {IEEE Virtual Reality 2014},
pages = {115–116},
publisher = {IEEE},
address = {Minneapolis, Minnesota},
abstract = {Consumer adoption of virtual reality technology has historically been held back by poor accessibility, the lack of intuitive multi-user capabilities, dependence on external infrastructure for rendering and tracking, and the amount of time and effort required to enter virtual reality systems. This poster presents the current status of our work creating MuVR, a Multi-User Virtual Reality platform that seeks to overcome these hindrances. The MuVR project comprises four main goals: scalable and easy to use multi-user capabilities, portable and self-contained hardware, a rapidly deployable system, and ready accessibility to others. We provide a description of the platform we developed to address these goals and discuss potential directions for future work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Feng, Andrew; Wang, Ruizhe; Medioni, Gerard; Bolas, Mark; Suma, Evan A.
Automatic Acquisition and Animation of Virtual Avatars Proceedings Article
In: Virtual Reality (VR), 2014 iEEE, pp. 185–186, IEEE, Minneapolis, Minnesota, 2014, ISBN: 978-1-4799-2871-2.
@inproceedings{shapiro_automatic_2014,
title = {Automatic Acquisition and Animation of Virtual Avatars},
author = {Ari Shapiro and Andrew Feng and Ruizhe Wang and Gerard Medioni and Mark Bolas and Evan A. Suma},
url = {http://ict.usc.edu/pubs/Automatic%20acquisition%20and%20animation%20of%20virtual%20avatars.pdf},
doi = {10.1109/VR.2014.6802113},
isbn = {978-1-4799-2871-2},
year = {2014},
date = {2014-03-01},
booktitle = {Virtual Reality (VR), 2014 iEEE},
pages = {185–186},
publisher = {IEEE},
address = {Minneapolis, Minnesota},
abstract = {The USC Institute for Creative Technologies will demonstrate a pipline for automatic reconstruction and animation of lifelike 3D avatars acquired by rotating the user's body in front of a single Microsoft Kinect sensor. Based on a fusion of state-of-the-art techniques in computer vision, graphics, and animation, this approach can produce a fully rigged character model suitable for real-time virtual environments in less than four minutes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Yahata, Rhys; Bolas, Mark; Suma, Evan
An Enhanced Steering Algorithm for Redirected Walking in Virtual Environments Proceedings Article
In: IEEE Virtual Reality 2014, pp. 65–66, 2014.
@inproceedings{azmandian_enhanced_2014,
title = {An Enhanced Steering Algorithm for Redirected Walking in Virtual Environments},
author = {Mahdi Azmandian and Rhys Yahata and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/An%20Enhanced%20Steering%20Algorithm%20for%20Redirected%20Walking%20in%20Virtual%20Environments.pdf},
year = {2014},
date = {2014-03-01},
booktitle = {IEEE Virtual Reality 2014},
pages = {65–66},
abstract = {Redirected walking techniques enable natural locomotion through immersive virtual environments that are considerably larger than the available real world walking space. However, the most effective strategy for steering the user remains an open question, as most previously presented algorithms simply redirect toward the center of the physical space. In this work, we present a theoretical framework that plans a walking path through a virtual environment and calculates the parameters for combining translation, rotation, and curvature gains such that the user can traverse a series of defined waypoints efficiently based on a utility function. This function minimizes the number of overt reorientations to avoid introducing potential breaks in presence. A notable advantage of this approach is that it leverages knowledge of the layout of both the physical and virtual environments to enhance the steering strategy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khademi, Mahmoud; Morency, Louis-Philippe
Relative Facial Action Unit Detection Proceedings Article
In: Proceedings of the Winter conference on Applications in Computer Vision, 2014.
@inproceedings{khademi_relative_2014,
title = {Relative Facial Action Unit Detection},
author = {Mahmoud Khademi and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Relative%20Facial%20Action%20Unit%20Detection.pdf},
year = {2014},
date = {2014-03-01},
booktitle = {Proceedings of the Winter conference on Applications in Computer Vision},
abstract = {This paper presents a subject-independent facial action unit (AU) detection method by introducing the concept of relative AU detection, for scenarios where the neutral face is not provided. We propose a new classification objective function which analyzes the temporal neighborhood of the current frame to decide if the expression recently increased, decreased or showed no change. This approach is a significant change from the conventional absolute method which decides about AU classification using the current frame, without an explicit comparison with its neighboring frames. Our proposed method improves robustness to individual differences such as face scale and shape, age-related wrinkles, and transitions among expressions (e.g., lower intensity of expressions). Our experiments on three publicly available datasets (Extended Cohn-Kanade (CK+), Bosphorus, and DISFA databases) show significant improvement of our approach over conventional absolute techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Shoemark, Philippa; Morency, Louis-Philippe
Toward Crowdsourcing Micro-Level Behavior Annotations - The Challenges of Interface, Training, and Generalization Proceedings Article
In: Proceedings of the 19th international conference on Intelligent User Interfaces, ACM, Haifa, Israel, 2014.
@inproceedings{park_toward_2014,
title = {Toward Crowdsourcing Micro-Level Behavior Annotations - The Challenges of Interface, Training, and Generalization},
author = {Sunghyun Park and Philippa Shoemark and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Toward%20Crowdsourcing%20Micro-Level%20Behavior%20Annotations%20-%20The%20Challenges%20of%20Interface,%20Training,%20and%20Generalization.pdf},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of the 19th international conference on Intelligent User Interfaces},
publisher = {ACM},
address = {Haifa, Israel},
abstract = {Research that involves human behavior analysis usually requires laborious and costly efforts for obtaining micro-level behavior annotations on a large video corpus. With the emerging paradigm of crowdsourcing however, these efforts can be considerably reduced. We first present OCTAB (Online Crowdsourcing Tool for Annotations of Behaviors), a web-based annotation tool that allows precise and convenient behavior annotations in videos, directly portable to popular crowdsourcing platforms. As part of OCTAB, we introduce a training module with specialized visualizations. The training module’s design was inspired by an observational study of local experienced coders, and it enables an iterative procedure for effectively training crowd workers online. Finally, we present an extensive set of experiments that evaluates the feasibility of our crowdsourcing approach for obtaining micro-level behavior annotations in videos, showing the reliability improvement in annotation accuracy when properly training online crowd workers. We also show the generalization of our training approach to a new independent video corpus.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Gowrisankar, Rasiga; Richmond, Todd; Shapiro, Ari; Xu, Yuyu; Feng, Andrew
Mobile Personal Healthcare Mediated by Virtual Humans Proceedings Article
In: Proceedings of the companion publication of the 19th international conference on Intelligent User Interfaces, pp. 21–24, ACM Press, Haifa, Israel, 2014, ISBN: 978-1-4503-2729-9.
@inproceedings{leuski_mobile_2014,
title = {Mobile Personal Healthcare Mediated by Virtual Humans},
author = {Anton Leuski and Rasiga Gowrisankar and Todd Richmond and Ari Shapiro and Yuyu Xu and Andrew Feng},
url = {http://dl.acm.org/citation.cfm?doid=2559184.2559200},
doi = {10.1145/2559184.2559200},
isbn = {978-1-4503-2729-9},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of the companion publication of the 19th international conference on Intelligent User Interfaces},
pages = {21–24},
publisher = {ACM Press},
address = {Haifa, Israel},
abstract = {We demonstrate Ally—-a prototype interface for a consumer–level medical diagnostic device. It is an interactive virtual character—-Virtual Human (VH)—-that listens to user's concern, collects and processes sensor data, offers advice, guides the user through a self-administered medical tests, and answers the user's questions. The primary focus of this demo is on the VH, we describe and demonstrate the technologies for language analysis, dialogue management, response generation and presentation. The sensing and medical decision making components are simulated in the current system, but possible applications and extensions are discussed.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Smith, Stephen; Traum, David; Alexander, Oleg; Leuski, Anton; Jones, Andrew; Georgila, Kallirroi; Debevec, Paul; Swartout, William; Maio, Heather
Time-offset Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of IUI 2014, pp. 163–168, ACM Press, Haifa, Israel, 2014, ISBN: 978-1-4503-2184-6.
@inproceedings{artstein_time-offset_2014,
title = {Time-offset Interaction with a Holocaust Survivor},
author = {Ron Artstein and Stephen Smith and David Traum and Oleg Alexander and Anton Leuski and Andrew Jones and Kallirroi Georgila and Paul Debevec and William Swartout and Heather Maio},
url = {http://ict.usc.edu/pubs/Time-Offset%20Interaction%20with%20a%20Holocaust%20Survivor.pdf},
doi = {10.1145/2557500.2557540},
isbn = {978-1-4503-2184-6},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of IUI 2014},
pages = {163–168},
publisher = {ACM Press},
address = {Haifa, Israel},
abstract = {Time-offset interaction is a new technology that allows for two-way communication with a person who is not available for conversation in real time: a large set of statements are prepared in advance, and users access these statements through natural conversation that mimics face-to-face interaction. Conversational reactions to user questions are retrieved through a statistical classifier, using technology that is similar to previous interactive systems with synthetic characters; however, all of the retrieved utterances are genuine statements by a real person. Recordings of answers, listening and idle behaviors, and blending techniques are used to create a persistent visual image of the person throughout the interaction. A proof-of-concept has been implemented using the likeness of Pinchas Gutter, a Holocaust survivor, enabling short conversations about his family, his religious views, and resistance. This proof-of-concept has been shown to dozens of people, from school children to Holocaust scholars, with many commenting on the impact of the experience and potential for this kind of interface.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Archer-McClellan, Haley; Gordon, Andrew S.
Triangle charades: a data-collection game for recognizing actions in motion trajectories Proceedings Article
In: Proceedings of the 19th international conference on Intelligent User Interfaces, pp. 209–214, ACM Press, Haifa, Israel, 2014, ISBN: 978-1-4503-2184-6.
@inproceedings{roemmele_triangle_2014,
title = {Triangle charades: a data-collection game for recognizing actions in motion trajectories},
author = {Melissa Roemmele and Haley Archer-McClellan and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=2557500.2557510},
doi = {10.1145/2557500.2557510},
isbn = {978-1-4503-2184-6},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of the 19th international conference on Intelligent User Interfaces},
pages = {209–214},
publisher = {ACM Press},
address = {Haifa, Israel},
abstract = {Humans have a remarkable tendency to anthropomorphize moving objects, ascribing to them intentions and emotions as if they were human. Early social psychology research demonstrated that animated film clips depicting the movements of simple geometric shapes could elicit rich interpretations of intentional behavior from viewers. In attempting to model this reasoning process in software, we first address the problem of automatically recognizing humanlike actions in the trajectories of moving shapes. There are two main difficulties. First, there is no defined vocabulary of actions that are recognizable to people from motion trajectories. Second, in order for an automated system to learn actions from motion trajectories using machine-learning techniques, a vast amount of these actiontrajectory pairs is needed as training data. This paper describes an approach to data collection that resolves both of these problems. In a web-based game, called Triangle Charades, players create motion trajectories for actions by animating a triangle to depict those actions. Other players view these animations and guess the action they depict. An action is considered recognizable if players can correctly guess it from animations. To move towards defining a controlled vocabulary and collecting a large dataset, we conducted a pilot study in which 87 users played Triangle Charades. Based on this data, we computed a simple metric for action recognizability. Scores on this metric formed a gradual linear pattern, suggesting there is no clear cutoff for determining if an action is recognizable from motion data. These initial results demonstrate the advantages of using a game to collect data for this action recognition task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz; Traum, David
Cultural Differences in Playing Repeated Ultimatum Game online with Virtual Humans Proceedings Article
In: The 47 Annual Hawaii International Conference on System Sciences, pp. 1213–1220, Computer Society Press, Big Island of Hawaii, 2014.
@inproceedings{nouri_cultural_2014,
title = {Cultural Differences in Playing Repeated Ultimatum Game online with Virtual Humans},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Cultural%20Differences%20in%20Playing%20Repeated%20Ultimatum%20Game%20Online%20with%20Virtual%20Humans.pdf},
year = {2014},
date = {2014-01-01},
booktitle = {The 47 Annual Hawaii International Conference on System Sciences},
pages = {1213–1220},
publisher = {Computer Society Press},
address = {Big Island of Hawaii},
abstract = {Efficient interaction between computational agents and users in tasks such as negotiation and bargaining requires recognition and understanding of potential differences in human behavior. Cultural differences in humans bargaining behavior are the focus of this study. We investigate the dynamics of human game playing with a conversational computational agent (Virtual Human). We demonstrate that the cultural background influences their observed behavior in this task. We investigate whether the social values held by the participants from each culture can at least partially explain the observed differences in behavior. We show that it is possible to automatically identify players’ cultures from their game behavior and to predict their upcoming decisions in different stages of a repeated game. We employ data collected from US and Indian participants playing repeated rounds of the Ultimatum Game online against a virtual human when low stakes are involved. Our results are comparable to the reported results of similar games played among people in laboratory conditions and with high stakes. The two cultures are different in terms of the statistics and the sequence of offers made in the game and their reported values. The findings of this study are valuable for development of culturally-sensitive computational agents for negotiation and bargaining.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
A semi-automated evaluation metric for dialogue model coherence Proceedings Article
In: Fifth International Workshop on Spoken Dialogue systems, pp. 141–150, 2014.
@inproceedings{gandhe_semi-automated_2014,
title = {A semi-automated evaluation metric for dialogue model coherence},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/A%20semi-automated%20evaluation%20metric%20for%20dialogue%20model%20coherence.pdf},
year = {2014},
date = {2014-01-01},
booktitle = {Fifth International Workshop on Spoken Dialogue systems},
pages = {141–150},
abstract = {We propose a new metric, Voted Appropriateness, which can be used to automatically evaluate dialogue policy decisions, once some wizard data has been collected. We show that this metric outperforms a previously proposed metric Weak agreement.We also present a taxonomy for dialogue model evaluation schemas, and orient our new metric within this taxonomy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Baltrušaitis, Tadas; Robinson, Peter; Morency, Louis-Philippe
Constrained local neural fields for robust facial landmark detection in the wild Proceedings Article
In: Computer Vision Workshops (ICCVW), 2013 IEEE International Conference on, pp. 354–361, IEEE, Sydney, Australia, 2013.
@inproceedings{baltrusaitis_constrained_2013,
title = {Constrained local neural fields for robust facial landmark detection in the wild},
author = {Tadas Baltrušaitis and Peter Robinson and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Constrained%20local%20neural%20fields%20for%20robust%20facial%20landmark%20detection%20in%20the%20wild.pdf},
year = {2013},
date = {2013-12-01},
booktitle = {Computer Vision Workshops (ICCVW), 2013 IEEE International Conference on},
pages = {354–361},
publisher = {IEEE},
address = {Sydney, Australia},
abstract = {Facial feature detection algorithms have seen great progress over the recent years. However, they still struggle in poor lighting conditions and in the presence of extreme pose or occlusions. We present the Constrained Local Neural Field model for facial landmark detection. Our model includes two main novelties. First, we introduce a probabilistic patch expert (landmark detector) that can learn non-linear and spatial relationships between the input pixels and the probability of a landmark being aligned. Secondly, our model is optimised using a novel Non-uniform Regularised Landmark Mean-Shift optimisation technique, which takes into account the reliabilities of each patch expert. We demonstrate the benefit of our approach on a number of publicly available datasets over other state-of-the-art approaches when performing landmark detection in unseen lighting conditions and in the wild.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2002
Gordon, Andrew S.; Lent, Michael
Virtual Humans as Participants vs. Virtual Humans as Actors Proceedings Article
In: AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_virtual_2002,
title = {Virtual Humans as Participants vs. Virtual Humans as Actors},
author = {Andrew S. Gordon and Michael Lent},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20as%20Participants%20vs%20Virtual%20Humans%20as%20Actors.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu Proceedings Article
In: The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_enabling_2002,
title = {Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Enabling%20and%20recognizing%20strategic%20play%20in%20strategy%20games-%20Lessons%20from%20Sun%20Tzu.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
abstract = {The interactive entertainment genre of the strategy game entertains users by allowing them to engage in strategic play, which should encourage game designers to devote development efforts toward facilitating users that wish to employ commonsense strategies, and to recognize and react to specific user strategies during game play. This paper attempts to facilitate these development efforts by identifying and analyzing 43 strategies from Sun Tzu's The Art of War, which are broadly applicable across games in the strategy game genre. For each strategy, a set of specific actions are identified that should be provided to users to enable their execution, along with generalized recognition rules that can facilitatethe design of entertaining responses to users' strategic behavior. Consideration of how the enabling actions could be incorporated into an existing strategy game is provided.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Fleischman, Michael; Hovy, Eduard
Emotional Variation in Speech-Based Natural Language Generation Proceedings Article
In: International Natural Language Generation Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{fleischman_emotional_2002,
title = {Emotional Variation in Speech-Based Natural Language Generation},
author = {Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/Emotional%20Variation%20in%20Speech-Based%20Natural%20Language%20Generation.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Natural Language Generation Conference},
abstract = {We present a framework for handling emotional variations in a speech-based natural language system for use in the MRE virtual training environment. The system is a first step toward addressing issues in emotion-based modeling of verbal communicative behavior. We cast the problem of emotional generation as a distance minimization task, in which the system chooses between multiple valid realizations for a given input based on the emotional distance of each realization from the speaker's attitude toward that input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Han, Changhee; Lent, Michael
Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments Proceedings Article
In: Proceedings of 14th Innovative Applications of Artificial Intelligence Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{hill_applying_2002,
title = {Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments},
author = {Randall W. Hill and Changhee Han and Michael Lent},
url = {http://ict.usc.edu/pubs/Applying%20Perceptually%20Driven%20Cognitive%20Mapping%20To%20Virtual%20Urban%20Environments.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of 14th Innovative Applications of Artificial Intelligence Conference},
abstract = {This paper describes a method for building a cognitive map of a virtual urban environment. Our routines enable virtual humans to map their environment using a realistic model of perception. We based our implementation on a computational framework proposed by Yeap and Jefferies (Yeap & Jefferies 1999) for representing a local environment as a structure called an Absolute Space Representation (ASR). Their algorithms compute and update ASRs from a 2-1/2D 1 sketch of the local environment, and then connect the ASRs together to form a raw cognitive map. Our work extends the framework developed by Yeap and Jefferies in three important ways. First, we implemented the framework in a virtual training environment, the Mission Rehearsal Exercise (Swartout et al. 2001). Second, we describe a method for acquiring a 2- 1/2D sketch in a virtual world, a step omitted from their framework, but which is essential for computing an ASR. Third, we extend the ASR algorithm to map regions that are partially visible through exits of the local space. Together, the implementation of the ASR algorithm along with our extensions will be useful in a wide variety of applications involving virtual humans and agents who need to perceive and reason about spatial concepts in urban environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication Proceedings Article
In: Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_ideas_2002,
title = {Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Ideas%20on%20Multi-layer%20Dialogue%20Management%20for%20Multi-party,%20Multi-conversation,%20Multi-modal%20Communication.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
A step toward irrationality: using emotion to change belief Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 334–341, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_step_2002,
title = {A step toward irrationality: using emotion to change belief},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20step%20toward%20irrationality-%20using%20emotion%20to%20change%20belief.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {334–341},
address = {Bologna, Italy},
abstract = {Emotions have a powerful impact on behavior and beliefs. The goal of our research is to create general computational models of this interplay of emotion, cognition and behavior to inform the design of virtual humans. Here, we address an aspect of emotional behavior that has been studied extensively in the psychological literature but largely ignored by computational approaches, emotion-focused coping. Rather than motivating external action, emotion-focused coping strategies alter beliefs in response to strong emotions. For example an individual may alter beliefs about the importance of a goal that is being threatened, thereby reducing their distress. We present a preliminary model of emotion-focused coping and discuss how coping processes, in general, can be coupled to emotions and behavior. The approach is illustrated within a virtual reality training environment where the models are used to create virtual human characters in high-stress social situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Kim, Youngjun; Gratch, Jonathan
Anticipating where to look: predicting the movements of mobile agents in complex terrain Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 821–827, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hill_anticipating_2002,
title = {Anticipating where to look: predicting the movements of mobile agents in complex terrain},
author = {Randall W. Hill and Youngjun Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Anticipating%20Where%20to%20Look-%20Predicting%20the%20Movements%20of%20Mobile%20Agents%20in%20Complex%20Terrain.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {821–827},
address = {Bologna, Italy},
abstract = {This paper describes a method for making short-term predictions about the movement of mobile agents in complex terrain. Virtual humans need this ability in order to shift their visual attention between dynamic objects-predicting where an object will be located a few seconds in the future facilitates the visual reacquisition of the target object. Our method takes into account environmental cues in making predictions and it also indicates how long the prediction is valid, which varies depending on the context. We implemented this prediction technique in a virtual pilot that flies a helicopter in a synthetic environment.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Iyer, Kumar; Valanejad, R.; Sadek, Ramy; Miraglia, D.; Milam, D.
Emotionally Evocative Environments for Training Proceedings Article
In: Proceedings of the 23th Army Science Conference, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_emotionally_2002,
title = {Emotionally Evocative Environments for Training},
author = {Jacquelyn Morie and Kumar Iyer and R. Valanejad and Ramy Sadek and D. Miraglia and D. Milam},
url = {http://ict.usc.edu/pubs/EMOTIONALLY%20EVOCATIVE%20ENVIRONMENTS%20FOR%20TRAINING.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of the 23th Army Science Conference},
address = {Orlando, FL},
abstract = {This paper describes a project currently in progress at the University of Southern California's Institute for Creative Technologies (ICT). Much of the research at ICT involves developing better graphics, sound and artificial intelligence to be used in creating the next generation of training tools for the United States Army. Our project focuses on the use of emotional responses as an enhancement for training. Research indicates that an emotional connection is a strong factor in how and what we remember. In addition, real world situations often evoke surprising and significant emotional reactions that soldiers must deal with. Few current immersive training scenarios, however, focus on the emotional state of the trainee, limiting training scenarios to basic objective elements. The Sensory Environments Evaluation (SEE) Project at ICT is investigating the potential of emotionally compelling environments for more effective training. We do this by skillfully combining the sensory inputs available in virtual environments. Our current efforts concentrate on sight and sound; smell will be included as scent delivery methods improve. Evaluation studies are planned to determine the effectiveness of the techniques we are developing.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2001
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation Proceedings Article
In: IEEE Workshop on Applications of Signal Processing to Audio and Acoustics, New Paltz, New York, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2001,
title = {Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/ROBUSTNESS%20OF%20THE%20EIGENFILTER%20FOR%20VARIATIONS%20IN%20LISTENER%20RESPONSES%20FOR%20SELECTIVE%20SIGNAL%20CANCELLATION.pdf},
year = {2001},
date = {2001-10-01},
booktitle = {IEEE Workshop on Applications of Signal Processing to Audio and Acoustics},
address = {New Paltz, New York},
abstract = {Selectively cancelling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, automobile, teleconferencing, office, industrial and other applications. We have proposed the eigenfilter for selectively cancelling signals in one direction, while attempting to retain them at unintentional directions. In this paper we investigate the behaviour of the performance measure (i.e., the gain) for a vowel and an unvoiced fricative, when the listener moves his head, in an automobile type environment. We show that in such a situation, a large energy in the difference between the impulse responses at a listener's location may affect the gain substantially. listeners in which only a subset wish to listen to the audio signal.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Douglas, Jay
Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after Proceedings Article
In: Proceedings of International Conference on Virtual Storytelling, pp. 100–112, Avignon, France, 2001, ISBN: 3-540-42611-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_adaptive_2001,
title = {Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after},
author = {Jonathan Gratch and Jay Douglas},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
doi = {10.1007/3-540-45420-9_12},
isbn = {3-540-42611-6},
year = {2001},
date = {2001-10-01},
booktitle = {Proceedings of International Conference on Virtual Storytelling},
pages = {100–112},
address = {Avignon, France},
series = {LNCS},
abstract = {Interacting Storytelling systems integrate AI techniques such as planning with narrative representations to generate stories. In this paper, we discuss the use of planning formalisms in Interactive Storytelling from the perspective of story generation and authoring. We compare two different planning formalisms, Hierarchical Task Network (HTN) planning and Heuristic Search Planning (HSP). While HTN provide a strong basis for narrative coherence in the context of interactivity, HSP offer additional flexibility and the generation of stories and the mechanisms for generating comic situations.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff
Intelligent Virtual Agents for Education and Training: Opportunities and Challenges Proceedings Article
In: Intelligent Virtual Agents: The 3rd International Workshop, Madrid, Spain, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{rickel_intelligent_2001,
title = {Intelligent Virtual Agents for Education and Training: Opportunities and Challenges},
author = {Jeff Rickel},
url = {http://ict.usc.edu/pubs/Intelligent%20Virtual%20Agents%20for%20Education%20and%20Training-%20Opportunities%20and%20Challenges.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Intelligent Virtual Agents: The 3rd International Workshop},
address = {Madrid, Spain},
abstract = {Interactive virtual worlds provide a powerful medium for ex- periential learning. Intelligent virtual agents can cohabit virtual worlds with people and facilitate such learning as guides, mentors, and team- mates. This paper reviews the main pedagogical advantages of animated agents in virtual worlds, discusses two key research challenges, and out- lines an ambitious new project addressing those challenges.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
New Factors in Room Equalization Using a Fuzzy Logic Approach Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, New York, NY, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_new_2001,
title = {New Factors in Room Equalization Using a Fuzzy Logic Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/New%20Factors%20in%20Room%20Equalization%20Using%20a%20Fuzzy%20Logic%20Approach.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {New York, NY},
abstract = {Room acoustical modes, particularly in small rooms, cause a signiï¬cant variation in the room responses measured at di!erent locations. Responses measured only a few cm apart can vary by up to 15-20 dB at certain frequencies. This makes it diffcult to equalize an audio system for multiple simultaneous listeners. Previous methods have utilized multiple microphones and spatial averaging with equal weighting. In this paper we present a different multiple point equalization method. We ï¬rst determine representative prototypical room responses derived from several room responses that share similar characteristics, using the fuzzy unsupervised learning method. These prototypical responses can then be combined to form a general point response. When we use the inverse of the general point response as an equalizing ï¬lter, our results show a signiï¬cant improvement in equalization performance over the spatial averaging methods. This simultaneous equalization is achieved by suppressing the peaks in the room magnitude spectrums. Applications of this method thus include equalization and multiple point sound control at home and in automobiles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans Proceedings Article
In: Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_effect_2001,
title = {The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affect-%20Modeling%20the%20Impact%20of%20Emotional%20State%20on%20the%20Behavior%20of%20Interactive%20Virtual%20Humans.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents},
address = {Montreal, Canada},
abstract = {A person's behavior provides signiï¬cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a range of cognitive and motor capabilities, a model of emotional appraisal, and a model of the impact of emotional state on physical behavior. We describe the key research issues, our approach, and an initial implementation in an Army peacekeeping scenario.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Douglas, Jay; Gratch, Jonathan
Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{douglas_adaptive_2001,
title = {Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After},
author = {Jay Douglas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {Creating dramatic narratives for real-time virtual reality environments is complicated by the lack of temporal distance between the occurrence of an event and its telling in the narrative. This paper describes the application of a multiprocessing operating system architecture to the creation of adaptive narratives, narratives that use autonomous actors or agents to create real-time dramatic experiences for human interactors. We also introduce the notion of dramatic acts and dramatic functions and indicate their use in constructing this real-time drama.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cohen, Jonathan; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Real-Time High-Dynamic Range Texture Mapping Proceedings Article
In: Eurographics Rendering Workshop, 2001.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{cohen_real-time_2001,
title = {Real-Time High-Dynamic Range Texture Mapping},
author = {Jonathan Cohen and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Real-Time%20High-Dynamic%20Range%20Texture%20Mapping.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Eurographics Rendering Workshop},
abstract = {This paper presents a technique for representing and displaying high dynamic-range texture maps (HDRTMs) using current graphics hardware. Dynamic range in real-world environments often far exceeds the range representable in 8-bit per-channel texture maps. The increased realism afforded by a high-dynamic range representation provides improved fidelity and expressiveness for interactive visualization of image-based models. Our technique allows for real-time rendering of scenes with arbitrary dynamic range, limited only by available texture memory. In our technique, high-dynamic range textures are decomposed into sets of 8- bit textures. These 8-bit textures are dynamically reassembled by the graphics hardware's programmable multitexturing system or using multipass techniques and framebuffer image processing. These operations allow the exposure level of the texture to be adjusted continuously and arbitrarily at the time of rendering, correctly accounting for the gamma curve and dynamic range restrictions of the display device. Further, for any given exposure only two 8-bit textures must be resident in texture memory simultaneously. We present implementation details of this technique on various 3D graphics hardware architectures. We demonstrate several applications, including high-dynamic range panoramic viewing with simulated auto-exposure, real-time radiance environment mapping, and simulated Fresnel reflection.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Hill, Randall W.; Gratch, Jonathan; Johnson, W. Lewis; Kyriakakis, Chris; Labore, Catherine; Lindheim, Richard; Marsella, Stacy C.; Miraglia, D.; Moore, Bridget; Morie, Jacquelyn; Rickel, Jeff; Thiebaux, Marcus; Tuch, L.; Whitney, Richard; Douglas, Jay
Toward the Holodeck: Integrating Graphics, Sound, Character and Story Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans, Virtual Worlds
@inproceedings{swartout_toward_2001,
title = {Toward the Holodeck: Integrating Graphics, Sound, Character and Story},
author = {William Swartout and Randall W. Hill and Jonathan Gratch and W. Lewis Johnson and Chris Kyriakakis and Catherine Labore and Richard Lindheim and Stacy C. Marsella and D. Miraglia and Bridget Moore and Jacquelyn Morie and Jeff Rickel and Marcus Thiebaux and L. Tuch and Richard Whitney and Jay Douglas},
url = {http://ict.usc.edu/pubs/Toward%20the%20Holodeck-%20Integrating%20Graphics,%20Sound,%20Character%20and%20Story.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {We describe an initial prototype of a holodeck-like environment that we have created for the Mission Rehearsal Exercise Project. The goal of the project is to create an experience learning system where the participants are immersed in an environment where they can encounter the sights, sounds, and circumstances of realworld scenarios. Virtual humans act as characters and coaches in an interactive story with pedagogical goals.},
keywords = {Social Simulation, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Modeling Emotions in the Mission Rehearsal Exercise Proceedings Article
In: Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation, pp. 457–466, Orlando, FL, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_modeling_2001,
title = {Modeling Emotions in the Mission Rehearsal Exercise},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Emotions%20in%20the%20Mission%20Rehearsal%20Exercise.pdf},
year = {2001},
date = {2001-05-01},
booktitle = {Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation},
pages = {457–466},
address = {Orlando, FL},
abstract = {This paper discusses our attempts to model realistic human behavior in the context of the Mission Rehearsal Exercise system (MRE), a high-end virtual training environment designed to support dismounted infantry training between a human participant and elements of his command. The system combines immersive graphics, sound, and interactive characters controlled by artificial intelligence programs. Our goal in this paper is to show how some of the daunting subtlety in human behavior can be modeled by intelligent agents and in particular to focus on the role of modeling typical human emotional responses to environmental stimuli.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Proceedings Article
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive 'interlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, pp. 278–285, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_tears_2001,
title = {Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Tears%20and%20Fears-%20Modeling%20emotions%20and%20emotional%20behaviors%20in%20synthetic%20agents.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
pages = {278–285},
address = {Montreal, Canada},
abstract = {Emotions play a critical role in creating engaging and believable characters to populate virtual worlds. Our goal is to create general computational models to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. In service of this goal, we integrate two complementary approaches to emotional modeling into a single unified system. Gratch's Émile system focuses on the problem of emotional appraisal: how emotions arise from an evaluation of how environmental events relate to an agent's plans and goals. Marsella et al. 's IPD system focuses more on the impact of emotions on behavior, including the impact on the physical expressions of emotional state through suitable choice of gestures and body language. This integrated model is layered atop Steve, a pedagogical agent architecture, and exercised within the context of the Mission Rehearsal Exercise, a prototype system designed to teach decision- making skills in highly evocative situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Debevec, Paul
A Photometric Approach to Digitizing Cultural Artifacts Proceedings Article
In: Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage, Glyfada, Greece, 2001.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_photometric_2001,
title = {A Photometric Approach to Digitizing Cultural Artifacts},
author = {Tim Hawkins and Jonathan Cohen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Photometric%20Approach%20to%20Digitizing%20Cultural%20Artifacts.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage},
address = {Glyfada, Greece},
abstract = {In this paper we present a photometry-based approach to the digital documentation of cultural artifacts. Rather than representing an artifact as a geometric model with spatially varying reflectance properties, we instead propose directly representing the artifact in terms of its reflectance field - the manner in which it transforms light into images. The principal device employed in our technique is a computer-controlled lighting apparatus which quickly illuminates an artifact from an exhaustive set of incident illumination directions and a set of digital video cameras which record the artifact's appearance under these forms of illumination. From this database of recorded images, we compute linear combinations of the captured images to synthetically illuminate the object under arbitrary forms of complex incident illumination, correctly capturing the effects of specular reflection, subsurface scattering, self-shadowing, mutual illumination, and complex BRDF's often present in cultural artifacts. We also describe a computer application that allows users to realistically and interactively relight digitized artifacts.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, C. M.; Narayanan, Shrikanth; Pieraccin, R.
Recognition of Negative Emotions from the Speech Signal Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop (ASRU 2001), 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{lee_recognition_2001,
title = {Recognition of Negative Emotions from the Speech Signal},
author = {C. M. Lee and Shrikanth Narayanan and R. Pieraccin},
url = {http://ict.usc.edu/pubs/Recognition%20of%20Negative%20Emotions%20from%20the%20Speech%20Signal.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop (ASRU 2001)},
abstract = {This paper reports on methods for automatic classification of spoken utterances based on the emotional state of the speaker. The data set used for the analysis comes from a corpus of human- machine dialogs recorded from a commercial application deployed by SpeechWorks. Linear discriminant classification with Gaussian class-conditional probability distribution and knearest neighborhood methods are used to classify utterances into two basic emotion states, negative and non-negative. The features used by the classifiers are utterance-level statistics of the fundamental frequency and energy of the speech signal. To improve classification performance, two specific feature selection methods are used; namely, promising first selection and forward feature selection. Principal component analysis is used to reduce the dimensionality of the features while maximizing classification accuracy. Improvements obtained by feature selection and PCA are reported in this paper. We reported the results.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Tchou, Chris; Debevec, Paul
Light Stage 2.0 Proceedings Article
In: SIGGRAPH Technical Sketches, pp. 217, 2001.
Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_light_2001,
title = {Light Stage 2.0},
author = {Tim Hawkins and Jonathan Cohen and Chris Tchou and Paul Debevec},
url = {http://ict.usc.edu/pubs/Light%20Stage%202.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
pages = {217},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Olsen, Mari; Traum, David; Ess-Dykema, Carol Van; Weinberg, Amy
Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System Proceedings Article
In: Machine Translation Summit VIII, Santiago de Compostela, Spain, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{olsen_implicit_2001,
title = {Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System},
author = {Mari Olsen and David Traum and Carol Van Ess-Dykema and Amy Weinberg},
url = {http://ict.usc.edu/pubs/Implicit%20Cues%20for%20Explicit%20Generation-%20Using%20Telicity%20as%20a%20Cue%20for%20Tense%20Structure%20in%20Chinese%20to%20English%20MT%20System.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Machine Translation Summit VIII},
address = {Santiago de Compostela, Spain},
abstract = {In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Tense information is crucial to providing accurate and fluent translations into English. Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is optional in Chinese and is not present in the majority of sentences. We report on a project that assesses the relative contribution of the lexical aspect features of (a)telicity reflected in the Lexical Conceptual Structure of the input text, versus more overt aspectual and adverbial markers of tense, to suggest tense structure in the English translation of a Chinese newspaper corpus. Incorporating this information allows a 20% to 35% boost in the accuracy of tense relization with the best accuracy rate of 92% on a corpus of Chinese articles.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Dai; Ai, Hongmei; Kyriakakis, Chris; Kuo, C. -C. Jay
Embedded High-Quality Multichannel Audio Coding Proceedings Article
In: Conference on Media Processors, Symposium on Electronic Imaging, San Jose, CA, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{yang_embedded_2001,
title = {Embedded High-Quality Multichannel Audio Coding},
author = {Dai Yang and Hongmei Ai and Chris Kyriakakis and C. -C. Jay Kuo},
url = {http://ict.usc.edu/pubs/Embedded%20High-Quality%20Multichannel%20Audio%20Coding.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Conference on Media Processors, Symposium on Electronic Imaging},
address = {San Jose, CA},
abstract = {An embedded high-quality multi-channel audio coding algorithms is proposed in this research. The Karhunen-Loeve Transform (KLT) is applied to multichannel audio signals in the pre-processing stage to remove inter-channel redundancy. Then, after processing of several audio coding blocks, transformed coefficients are layered quantized and the bit stream is ordered according to their importance. The multichannel audio bit stream generated by the propoesed algorithm has a fully progressive property, which is highly desirable for audio multicast applications in heterogenous networks. Experimental results show that, compared with the MPEG Advanced Audio Coding (AAC) algorithm, the proposed algorithm achieves a better performance with both the objective MNR (Mask-to-Noise-Ratio) measurement and the subjective listening test at several different bit rates.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Waese, Jamie; Debevec, Paul
A Real Time High Dynamic Range Light Probe Proceedings Article
In: SIGGRAPH Technical Sketches, 2001.
Links | BibTeX | Tags: Graphics
@inproceedings{waese_real_2001,
title = {A Real Time High Dynamic Range Light Probe},
author = {Jamie Waese and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Real%20Time%20High%20Dynamic%20Range%20Light%20Probe.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
A Cluster Centroid Method for Room Response Equalization at Multiple Locations Proceedings Article
In: IEEE Workshop on the Applications of Signal Processing to Audio and Acoustics, pp. 55–58, New Platz, NY, 2001, ISBN: 0-7803-7126-7.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_cluster_2001,
title = {A Cluster Centroid Method for Room Response Equalization at Multiple Locations},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/A%20CLUSTER%20CENTROID%20METHOD%20FOR%20ROOM%20RESPONSE%20EQUALIZATION%20AT%20MULTIPLE%20LOCATIONS.pdf},
isbn = {0-7803-7126-7},
year = {2001},
date = {2001-01-01},
booktitle = {IEEE Workshop on the Applications of Signal Processing to Audio and Acoustics},
pages = {55–58},
address = {New Platz, NY},
abstract = {In this paper we address the problem of simultaneous room response equalization for multiple listeners. Traditional approaches to this problem have used a single microphone at the listening position to measure impulse responses from a loudspeaker and then use an inverse filter to correct the frequency response. The problem with that approach is that it only works well for that one point and in most cases is not practical even for one listener with a typical ear spacing of 18 cm. It does not work at all for other listeners in the room, or if the listener changes positions even slightly. We propose a new approach that is based on the Fuzzy c-means clustering technique. We use this method to design equalization filters and demonstrate that we can achieve better equalization performance for several locations in the room simultaneously as compared to single point or simple averaging methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Srinivasamurthy, Naveen; Narayanan, Shrikanth; Ortega, Antonio
Use of Model Transformations for Distributed Speech Recognition Proceedings Article
In: 4th ISCA Tutorial and Research Workshop on Speech Synthesis, pp. 113–116, Sophia Antipolis, France, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{srinivasamurthy_use_2001,
title = {Use of Model Transformations for Distributed Speech Recognition},
author = {Naveen Srinivasamurthy and Shrikanth Narayanan and Antonio Ortega},
url = {http://ict.usc.edu/pubs/Use%20of%20Model%20Transformations%20for%20Distributed%20Speech%20Recognition.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {4th ISCA Tutorial and Research Workshop on Speech Synthesis},
pages = {113–116},
address = {Sophia Antipolis, France},
abstract = {Due to bandwidth limitations, the speech recognizer in distributed speech recognition (DSR) applications has to use encoded speech - either traditional speech encoding or speech encoding optimized for recognition. The penalty incurred in reducing the bitrate is degradation in speech recognition performance. The diversity of the applications using DSR implies that a variety of speech encoders can be used to compress speech. By treating the encoder variability as a mismatch we propose using model transformation to reduce the speech recognition performance degradation. The advantage of using model transformation is that only a single model set needs to be trained at the server, which can be adapted on the fly to the input speech data. We were able to reduce the word error rate by 61.9%, 63.3% and 56.3% for MELP, GSM and MFCC-encoded data, respectively, by using MAP adaptation, which shows the generality of our proposed scheme.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Damiano, Rossana; Traum, David
Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems Proceedings Article
In: NAACL 2001 Workshop on Adaptation in Dialogue Systems, 2001.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{damiano_anticipatory_2001,
title = {Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems},
author = {Rossana Damiano and David Traum},
url = {http://ict.usc.edu/pubs/Anticipatory%20planning%20for%20decision-theoretic%20grounding%20and%20task%20advancement%20in%20mixed-initiative%20dialogue%20systems.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {NAACL 2001 Workshop on Adaptation in Dialogue Systems},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations Proceedings Article
In: Proceedings of 23rd Annual Conference of the Cognitive Science Society, Edinburgh, Scotland, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2001,
title = {Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20of%20Emotions%20and%20Plans%20in%20Multi-Agent%20Simulations.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 23rd Annual Conference of the Cognitive Science Society},
address = {Edinburgh, Scotland},
abstract = {The goal of this research is to create general computational models of the interplay between affect, cognition and behavior. These models are being designed to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. We attempt to capture both the cognitive and behavioral aspects of emotion, circumscribed to the role emotions play in the performance of concrete physical tasks. We address how emotions arise from an evaluation of the relationship between environmental events and an agent's plans and goals, as well as the impact of emotions on behavior, in particular the impact on the physical expressions of emotional state through suitable choice of gestures and body language. The approach is illustrated within a virtual reality training environment.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Dai; Ai, Hongmei; Kyriakakis, Chris; Kuo, C. -C. Jay
Adaptive Karhunen-Loeve Transform for Enhanced Multichannel Audio Coding Proceedings Article
In: SPIE, San Diego, CA, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{yang_adaptive_2001,
title = {Adaptive Karhunen-Loeve Transform for Enhanced Multichannel Audio Coding},
author = {Dai Yang and Hongmei Ai and Chris Kyriakakis and C. -C. Jay Kuo},
url = {http://ict.usc.edu/pubs/Adaptive%20Karhunen-Loeve%20Transform%20for%20Enhanced%20Multichannel%20Audio%20Coding.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SPIE},
address = {San Diego, CA},
abstract = {A modified MPEG Advanced Audio Coding (AAC) scheme based on the Karhunen-Loeve transform (KLT) to remove inter-channel redundancy, which is called the MAACKL method, has been proposed in our previous work. However, a straightforward coding of elements of the KLT matrix generates about 240 bits per matrix for typical 5 channel audio contents. Such an overhead is too expensive so that it prevents MAACKL from updating KLT dynamically in a short period of time. In this research, we study the de-correlation efficiency of adaptive KLT as well as an efficient way to encode elements of the KLT matrix via vector quantization. The effect due to different quantization accuracy and adaptation period is examined carefully. It is demonstrated that with the smallest possible number of bits per matrix and a moderately long KLT adaptation time, the MAACKL algorithm can still generate a very good coding performance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2000
Debevec, Paul; Hawkins, Tim; Tchou, Chris; Duiker, Haarm-Pieter; Sarokin, Westley
Acquiring the Reflectance Field of a Human Face Proceedings Article
In: SIGGRAPH, New Orleans, LA, 2000.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_acquiring_2000,
title = {Acquiring the Reflectance Field of a Human Face},
author = {Paul Debevec and Tim Hawkins and Chris Tchou and Haarm-Pieter Duiker and Westley Sarokin},
url = {http://ict.usc.edu/pubs/Acquiring%20the%20Re%EF%AC%82ectance%20Field%20of%20a%20Human%20Face.pdf},
year = {2000},
date = {2000-07-01},
booktitle = {SIGGRAPH},
address = {New Orleans, LA},
abstract = {We present a method to acquire the reflectance field of a human face and use these measurements to render the face under arbitrary changes in lighting and viewpoint. We first acquire images of the face from a small set of viewpoints under a dense sampling of incident illumination directions using a light stage. We then construct a reflectance function image for each observed image pixel from its values over the space of illumination directions. From the reflectance functions, we can directly generate images of the face from the original viewpoints in any form of sampled or computed illumination. To change the viewpoint, we use a model of skin reflectance to estimate the appearance of the reflectance functions for novel viewpoints. We demonstrate the technique with synthetic renderings of a person's face under novel illumination and viewpoints.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
Selective Signal Cancellation for Multiple Listener Audio Applications: An Information Theory Approach Proceedings Article
In: IEEE International Conference Multimedia and Expo, New York, NY, 2000.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_selective_2000,
title = {Selective Signal Cancellation for Multiple Listener Audio Applications: An Information Theory Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/SELECTIVE%20SIGNAL%20CANCELLATION%20FOR%20MULTIPLE-LISTENER%20AUDIO%20APPLICATIONS-%20AN%20INFORMATION%20THEORY%20APPROACH.pdf},
year = {2000},
date = {2000-07-01},
booktitle = {IEEE International Conference Multimedia and Expo},
address = {New York, NY},
abstract = {Selectively canceling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, teleconferencing, office, industrial and other applications. The traditional noise cancellation approach is impractical for such applications because it requires sensors that must be placed on the listeners. In this paper we propose an alternative method to minimize signal power in a given location and maximize signal power in another location of interest. A key advantage of this approach would be the need to eliminate sensors. We investigate the use of an information theoretic criterion known as mutual information to design filter coefficients that selectively cancel a signal in one audio channel, and transmit it in another (complementary) channel. Our results show an improvement in power gain at one location in the room relative to the other.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Human-like behavior, alas, demands human-like intellect Proceedings Article
In: Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents, Barcelona, Spain, 2000.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_human-like_2000,
title = {Human-like behavior, alas, demands human-like intellect},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Human-like%20behavior%20alas%20demands%20human-like%20intellect.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents},
address = {Barcelona, Spain},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Gratch, Jonathan; Rosenbloom, Paul
Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Social Simulation, Virtual Humans
@inproceedings{hill_flexible_2000,
title = {Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces},
author = {Randall W. Hill and Jonathan Gratch and Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Flexible%20Group%20Behavior-%20Virtual%20Commanders%20for%20Synthetic%20Battlespaces.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
address = {Barcelona, Spain},
abstract = {This paper describes a project to develop autonomous commander agents for synthetic battlespaces. The commander agents plan missions, monitor their execution, and replan when necessary. To reason about the social aspects of group behavior, the commanders take various social stances that enable them to collaborate with friends, exercise or defer to authority, and thwart their foes. The purpose of this paper is to describe these capabilities and how they came to be through a series of lessons learned while developing autonomous agents for this domain.},
keywords = {CogArch, Cognitive Architecture, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Gratch, Jonathan
How Long Can an Agent Look Away From a Target? Proceedings Article
In: 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_how_2000,
title = {How Long Can an Agent Look Away From a Target?},
author = {Youngjun Kim and Randall W. Hill and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/how%20long%20can%20you%20look%20away%20from%20a%20target.pdf},
year = {2000},
date = {2000-05-01},
booktitle = {9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Situation awareness (SA) is the perception of the elements in the environment within a volume of time and space, the comprehension of their meaning, and the projection of their status in the near future [3]. Although the impact of situation awareness and assessment on humans in complex systems is clear, no one theory for SA has been developed. A critical aspect of the SA problem is that agents must construct an overall view of a dynamically changing world using limited sensor channels. For instance, a (virtual) pilot, who visually tracks the location and direction of several vehicles that he cannot see simultaneously, must shift its visual field of view to scan the environment and to sense the situation involved. How he directs his attention, for how long, and how he efficiently reacquires targets is the central question we address in this paper. We describe the perceptual coordination that helps a virtual pilot efficiently track one or more objects. In SA, it is important for a virtual pilot having a limited visual field of view to gather more information from its environment and to choose appropriate actions to take in the environment without losing the target.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgiou, Panayiotis G.; Kyriakakis, Chris
A Multiple Input Single Output Model for Rendering Virtual Sound Sources in Real Time Proceedings Article
In: Proceedings of ICME 2000, New York, NY, 2000.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgiou_multiple_2000,
title = {A Multiple Input Single Output Model for Rendering Virtual Sound Sources in Real Time},
author = {Panayiotis G. Georgiou and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/A%20MULTIPLE%20INPUT%20SINGLE%20OUTPUT%20MODEL%20FOR%20RENDERING%20VIRTUAL%20SOUND%20SOURCES%20IN%20REAL%20TIME.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of ICME 2000},
address = {New York, NY},
abstract = {Accurate localization of sound in 3-D space is based on variations in the spectrum of sound sources. These variations arise mainly from reflection and diffraction effects caused by the pinnae and are described through a set of Head-Related Transfer Functions (HRTF’s) that are unique for each azimuth and elevation angle. A virtual sound source can be rendered in the desired location by filtering with the corresponding HRTF for each ear. Previous work on HRTF modeling has mainly focused on the methods that attempt to model each transfer function individually. These methods are generally computationally-complex and cannot be used for real-time spatial rendering of multiple moving sources. In this work we provide an alternative approach, which uses a multiple input single output state space system to creat a combined model of the HRTF’s for all directions. This method exploits the similarities among the different HRTF’s to achieve a significant reduction in the model size with a minimum loss of accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Èmile: Marshalling Passions in Training and Education Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, pp. 325–332, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_emile_2000,
title = {Èmile: Marshalling Passions in Training and Education},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emile-%20Marshalling%20Passions%20in%20Training%20and%20Education.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
pages = {325–332},
address = {Barcelona, Spain},
abstract = {Emotional reasoning can be an important contribution to automated tutoring and training systems. This paper describes �mile, a model of emotional reasoning that builds upon existing approaches and significantly generalizes and extends their capabilities. The main contribution is to show how an explicit planning model allows a more general treatment of several stages of the reasoning process. The model supports educational applications by allowing agents to appraise the emotional significance of events as they relate to students' (or their own) plans and goals, model and predict the emotional state of others, and alter behavior accordingly.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Modeling the Interplay Between Emotion and Decision-Making Proceedings Article
In: Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_modeling_2000,
title = {Modeling the Interplay Between Emotion and Decision-Making},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20Between%20Emotion%20and%20Decision-Making.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Current models of computer-generated forces are limited by their inability to model many of the moderators that influence the performance of real troops in the field such as the effects of stress, emotion, and individual differences. This article discusses an extension to our command and control modeling architecture that begins to address how behavioral moderators influence the command decision-making process. Our Soar-Cfor command architecture was developed under the STOW and ASTT programs to support distributed command and control decision-making in the domain of army aviation planning. We have recently extended this architecture to model how people appraise the emotional significance of events and how these events influence decision making.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Scholer, Andrew; Rickel, Jeff; Angros, Richard Jr.; Johnson, W. Lewis
Learning Domain Knowledge for Teaching Procedural Tasks Proceedings Article
In: AAAI-2000 Fall Symposium on Learning How to Do Things, 2000.
Abstract | Links | BibTeX | Tags:
@inproceedings{scholer_learning_2000,
title = {Learning Domain Knowledge for Teaching Procedural Tasks},
author = {Andrew Scholer and Jeff Rickel and Richard Jr. Angros and W. Lewis Johnson},
url = {http://ict.usc.edu/pubs/Learning%20Domain%20Knowledge%20for%20Teaching%20Procedural%20Tasks.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {AAAI-2000 Fall Symposium on Learning How to Do Things},
abstract = {Providing domain knowledge needed by intelligent tutoring systems to teach a procedure to students is traditionally a difficult and time consuming task. This paper presents a system for making this process easier by allowing the automated tutor to acquire the knowledge it needs through a combination of programming by demonstration, autonomous experimentation, and direct instruction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
Eigenfilters for Signal Cancellation Proceedings Article
In: International Symposium on Intelligent Signal Processing and Communication Systems (ISPACS), Hawaii, 2000.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_eigenfilters_2000,
title = {Eigenfilters for Signal Cancellation},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/EIGENFILTERS%20FOR%20SIGNAL%20CANCELLATION.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {International Symposium on Intelligent Signal Processing and Communication Systems (ISPACS)},
address = {Hawaii},
abstract = {Selectively canceling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, automobile, teleconferencing, office, industrial and other applications. The traditional noise cancellation approach is impractical for such applications because it requires sensors that must be placed on the listeners. In this paper we investigate the theoretical properties of eigenfilters for signal cancellation proposed in [1]. We also investigate the sensitivity of the eigenfilter as a function of the room impulse response duration. Our results show that with the minimum phase model for the room impulse response, we obtain a better behaviour in the sensitivity of the filter to the duration of the room response.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Srinivasamurthy, Naveen; Ortega, Antonio; Narayanan, Shrikanth
Efficient Scalable Speech Compression for Scalable Speech Recognition Proceedings Article
In: Proceedings of the IEEE Conference on Multimedia and Expo, 2000.
Abstract | Links | BibTeX | Tags:
@inproceedings{srinivasamurthy_efficient_2000,
title = {Efficient Scalable Speech Compression for Scalable Speech Recognition},
author = {Naveen Srinivasamurthy and Antonio Ortega and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Efficient%20Scalable%20Speech%20Compression%20for%20Scalable%20Speech%20Recognition.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the IEEE Conference on Multimedia and Expo},
abstract = {We propose a scalable recognition system for reducing recognition complexity. Scalable recognition can be combined with scalable compression in a distributed speech recognition (DSR) application to reduce both the computational load and the bandwidth requirement at the server. A low complexity preprocessor is used to eliminate the unlikely classes so that the complex recognizer can use the reduced subset of classes to recognize the unknown utterance. It is shown that by using our system it is fairly straightforward to trade-off reductions in complexity for performance degradation. Results of preliminary experiments using the TI-46 word digit database show that the proposed scalable approach can provide a 40% speed up, while operating under 1.05 kbps, compared to the baseline recognition using uncompressed speech.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
1999
Georgiou, Panayiotis G.; Tsakalides, Panagiotis; Kyriakakis, Chris
Alpha-Stable Modeling of Noise and Robust Time- Delay Estimation in the Presence of Impulsive Noise Proceedings Article
In: IEEE Transactions on Multimedia, pp. 291–301, 1999.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgiou_alpha-stable_1999,
title = {Alpha-Stable Modeling of Noise and Robust Time- Delay Estimation in the Presence of Impulsive Noise},
author = {Panayiotis G. Georgiou and Panagiotis Tsakalides and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Alpha-Stable%20Modeling%20of%20Noise%20and%20Robust%20Time-%20Delay%20Estimation%20in%20the%20Presence%20of%20Impulsive%20Noise.pdf},
year = {1999},
date = {1999-09-01},
booktitle = {IEEE Transactions on Multimedia},
volume = {1},
pages = {291–301},
abstract = {A new representation of audio noise signals is proposed, based on symmetric-stable (S S) distributions in order to better model the outliers that exist in real signals. This representation addresses a shortcoming of the Gaussian model, namely, the fact that it is not well suited for describing signals with impulsive behavior. The stable and Gaussian methods are used to model measured noise signals. It is demonstrated that the stable distribution, which has heavier tails than the Gaussian distribution, gives a much better approximation to real-world audio signals. The significance of these results is shown by considering the time delay estimation (TDE) problem for source localization in teleimmersion applications. In order to achieve robust sound source localization, a novel time delay estimation approach is proposed. It is based on fractional lower order statistics (FLOS), which mitigate the effects of heavy-tailed noise. An improvement in TDE performance is demonstrated using FLOS that is up to a factor of four better than what can be achieved with second-order statistics.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Hill, Randall W.
Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces Proceedings Article
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_continuous_1999,
title = {Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces},
author = {Jonathan Gratch and Randall W. Hill},
url = {http://ict.usc.edu/pubs/Continuous%20Planning%20and%20Collaboration%20for%20Command%20and%20Control%20in%20Joint%20Synthetic%20Battlespaces.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {In this paper we describe our efforts to model command and control entities for Joint Synthetic Battlespaces. Command agents require a broader repertoire of capabilities than is typically modeled in simulation. They must develop mission plans involving multiple subordinate units, monitor execution, dynamically modify mission plans in response to situational contingencies, collaborate with other decision makers, and deal with a host of organizational issues. We describe our approach to command agent modeling that addresses a number of these issues through its continuous and collaborative approach to mission planning.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Hill, Randall W.; III, LTC George Stone
Deriving Priority Intelligence Requirements for Synthetic Command Entities Proceedings Article
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_deriving_1999,
title = {Deriving Priority Intelligence Requirements for Synthetic Command Entities},
author = {Jonathan Gratch and Stacy C. Marsella and Randall W. Hill and LTC George Stone III},
url = {http://ict.usc.edu/pubs/Deriving%20Priority%20Intelligence%20Requirements%20for%20Synthetic%20Command%20Entities.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {Simulation-based training is using increasingly complex synthetic forces. As more complex multiechelon synthetic forces are employed in simulations, the need for a realistic model of their command and control behavior becomes more urgent. In this paper we discuss one key component of such a model, the autonomous generation and use of priority intelligence requirements within multi-echelon plans.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Why You Should Buy an Emotional Planner Proceedings Article
In: Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures, 1999.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_why_1999,
title = {Why You Should Buy an Emotional Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Why%20You%20Should%20Buy%20an%20Emotional%20Planner.pdf},
year = {1999},
date = {1999-01-01},
booktitle = {Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures},
abstract = {Computation models of emotion have begun to address the problem of how agents arrive at a given emotional state, and how that state might alter their reactions to the environment. Existing work has focused on reactive models of behavior and does not, as of yet, provide much insight on how emotion might relate to the construction and execution of complex plans. This article focuses on this later question. I present a model of how agents ap- praise the emotion significance of events that illustrates a complementary relationship between classical planning methods and models of emotion processing. By building on classical planning methods, the model clarifies prior accounts of emotional appraisal and extends these ac- counts to handle the generation and execution of com- plex multi-agent plans.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}