Publications
Search
Ustun, Volkan; Rosenbloom, Paul S.
Towards Adaptive, Interactive Virtual Humans in Sigma Proceedings Article
In: Intelligent Virtual Agents, pp. 98 –108, Springer, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0.
@inproceedings{ustun_towards_2015,
title = {Towards Adaptive, Interactive Virtual Humans in Sigma},
author = {Volkan Ustun and Paul S. Rosenbloom},
url = {http://ict.usc.edu/pubs/Towards%20Adaptive,%20Interactive%20Virtual%20Humans%20in%20Sigma.pdf},
doi = {10.1007/978-3-319-21996-7_10},
isbn = {978-3-319-21995-0},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {98 –108},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {Sigma is a nascent cognitive architecture/system that combines concepts from graphical models with traditional symbolic architectures. Here an initial Sigma-based virtual human (VH) is introduced that combines probabilistic reasoning, rule-based decision-making, Theory of Mind, Simultaneous Localization and Mapping and reinforcement learning in a unified manner. This non-modular unification of diverse cognitive, robotic and VH capabilities provides an important first step towards fully adaptive and interactive VHs in Sigma.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; DeVault, David; Lucas, Gale M.; Marsella, Stacy
Negotiation as a Challenge Problem for Virtual Humans Proceedings Article
In: Brinkman, Willem-Paul; Broekens, Joost; Heylen, Dirk (Ed.): Intelligent Virtual Agents, pp. 201–215, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
@inproceedings{gratch_negotiation_2015,
title = {Negotiation as a Challenge Problem for Virtual Humans},
author = {Jonathan Gratch and David DeVault and Gale M. Lucas and Stacy Marsella},
editor = {Willem-Paul Brinkman and Joost Broekens and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Negotiation%20as%20a%20Challenge%20Problem%20for%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7_21},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {201–215},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {We argue for the importance of negotiation as a challenge problem for virtual human research, and introduce a virtual conversational agent that allows people to practice a wide range of negotiation skills. We describe the multi-issue bargaining task, which has become a de facto standard for teaching and research on negotiation in both the social and computer sciences. This task is popular as it allows scientists or instructors to create a variety of distinct situations that arise in real-life negotiations, simply by manipulating a small number of mathematical parameters. We describe the development of a virtual human that will allow students to practice the interpersonal skills they need to recognize and navigate these situations. An evaluation of an early wizard-controlled version of the system demonstrates the promise of this technology for teaching negotiation and supporting scientific research on social intelligence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Hill, Susan; Morency, Louis-Philippe; Pynadath, David; Traum, David
Exploring the Implications of Virtual Human Research for Human-Robot Teams Proceedings Article
In: Virtual, Augmented and Mixed Reality, pp. 186–196, Springer International Publishing, Los Angeles, CA, 2015, ISBN: 978-3-319-21066-7 978-3-319-21067-4.
@inproceedings{gratch_exploring_2015,
title = {Exploring the Implications of Virtual Human Research for Human-Robot Teams},
author = {Jonathan Gratch and Susan Hill and Louis-Philippe Morency and David Pynadath and David Traum},
url = {http://ict.usc.edu/pubs/Exploring%20the%20Implications%20of%20Virtual%20Human%20Research%20for%20Human-Robot%20Teams.pdf},
doi = {10.1007/978-3-319-21067-4_20},
isbn = {978-3-319-21066-7 978-3-319-21067-4},
year = {2015},
date = {2015-08-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9179},
pages = {186–196},
publisher = {Springer International Publishing},
address = {Los Angeles, CA},
abstract = {This article briefly explores potential synergies between the fields of virtual human and human-robot interaction research. We consider challenges in advancing the effectiveness of human-robot teams makes recommendations for enhancing this by facilitating synergies between robotics and virtual human research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Stratou, Giota; Lucas, Gale M.; Gratch, Jonathan
Comparing Behavior Towards Humans and Virtual Humans in a Social Dilemma Proceedings Article
In: Intelligent Virtual Agents, pp. 452–460, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
@inproceedings{hoegen_comparing_2015,
title = {Comparing Behavior Towards Humans and Virtual Humans in a Social Dilemma},
author = {Rens Hoegen and Giota Stratou and Gale M. Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Comparing%20Behavior%20Towards%20Humans%20and%20Virtual%20Humans%20in%20a%20Social%20Dilemma.pdf},
doi = {10.1007/978-3-319-21996-7 48},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {452–460},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {The difference of shown social behavior towards virtual humans and real humans has been subject to much research. Many of these studies compare virtual humans (VH) that are presented as either virtual agents controlled by a computer or as avatars controlled by real humans. In this study we directly compare VHs with real humans. Participants played an economic game against a computer-controlled VH or a visible human opponent. Decisions made throughout the game were logged, additionally participants’ faces were filmed during the study and analyzed with expression recognition software. The analysis of choices showed participants are far more willing to violate social norms with VHs: they are more willing to steal and less willing to forgive. Facial expressions show trends that suggest they are treating VHs less socially. The results highlight, that even in impoverished social interactions, VHs have a long way to go before they can evoke truly human-like responses.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Unnikrishnan, K. V.; Shankar, Santosh; Merchant, Chirag
Intelligent Agents for Virtual Simulation of Human-Robot Interaction Proceedings Article
In: Virtual, Augmented and Mixed Reality, pp. 228 – 239, Springer International Publishing, Los Angeles, CA, 2015, ISBN: 978-3-319-21066-7 978-3-319-21067-4.
@inproceedings{wang_intelligent_2015,
title = {Intelligent Agents for Virtual Simulation of Human-Robot Interaction},
author = {Ning Wang and David V. Pynadath and K. V. Unnikrishnan and Santosh Shankar and Chirag Merchant},
url = {http://ict.usc.edu/pubs/Intelligent%20Agents%20for%20Virtual%20Simulation%20of%20Human-Robot%20Interaction.pdf},
doi = {10.1007/978-3-319-21067-4 24},
isbn = {978-3-319-21066-7 978-3-319-21067-4},
year = {2015},
date = {2015-08-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9179},
pages = {228 – 239},
publisher = {Springer International Publishing},
address = {Los Angeles, CA},
series = {Lecture Notes in Computer Science},
abstract = {To study how robots can work better with humans as a team, we have designed an agent-based online testbed that supports virtual simulation of domain-independent human-robot interaction. The simulation is implemented as an online game where humans and virtual robots work together in simulated scenarios. This testbed allows researchers to carry out human-robot interaction studies and gain better understanding of, for example, how a robot’s communication can improve human-robot team performance by fostering better trust relationships among humans and their robot teammates. In this paper, we discuss the requirements, challenges and the design of such human-robot simulation. We illustrate its operation with an example human-robot joint reconnaissance task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bruijnes, Merijn; Akker, Rieks; Hartholt, Arno; Heylen, Dirk
Virtual Suspect William Proceedings Article
In: Intelligent Virtual Agents, pp. 67–76, Springer, 2015.
@inproceedings{bruijnes_virtual_2015,
title = {Virtual Suspect William},
author = {Merijn Bruijnes and Rieks Akker and Arno Hartholt and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Virtual%20Suspect%20William.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {67–76},
publisher = {Springer},
abstract = {We evaluate an algorithm which computes the responses of an agent that plays the role of a suspect in simulations of police interrogations. The algorithm is based on a cognitive model - the response model - that is centred around keeping track of interpersonal relations. The model is parametrized in such a way that different personalities of the virtual suspect can be defined. In the evaluation we defined three different personalities and had participants guess the personality based on the responses the model provided in an interaction with the participant. We investigate what factors contributed to the ability of a virtual agent to show behaviour that was recognized by participants as belonging to a persona.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Furbach, Ulrich; Gordon, Andrew S.; Schon, Claudia
Tackling Benchmark Problems of Commonsense Reasoning Proceedings Article
In: Proceedings of the Workshop on Bridging the Gap between Human and Automated Reasoning, pp. 47 – 59, Berlin, Germany, 2015.
@inproceedings{furbach_tackling_2015,
title = {Tackling Benchmark Problems of Commonsense Reasoning},
author = {Ulrich Furbach and Andrew S. Gordon and Claudia Schon},
url = {http://ict.usc.edu/pubs/Tackling%20Benchmark%20Problems%20of%20Commonsense%20Reasoning.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the Workshop on Bridging the Gap between Human and Automated Reasoning},
volume = {1412},
pages = {47 – 59},
address = {Berlin, Germany},
abstract = {There is increasing interest in the field of automated commonsense reasoning to find real world benchmarks to challenge and to further develop reasoning systems. One interesting example is the Triangle Choice of Plausible Alternatives (Triangle-COPA), which is a set of problems presented in first-order logic. The setting of these problems stems from the famous Heider-Simmel film used in early experiments in social psychology. This paper illustrates with two logical approaches abductive logic programming and deonitc logictextbackslashtextbackslashtextbarhow these problems can be solved. Furthermore, we propose an idea of how to use background knowledge to support the reasoning process.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham
Skin Stretch - Simulating Dynamic Skin Microgeometry Proceedings Article
In: ACM SIGGRAPH 2015 Computer Animation Festival, pp. 133, Los Angeles, CA, 2015.
@inproceedings{nagano_skin_2015-1,
title = {Skin Stretch - Simulating Dynamic Skin Microgeometry},
author = {Koki Nagano and Graham Fyffe},
url = {http://ict.usc.edu/pubs/Skin%20Stretch%20-%20Simulating%20Dynamic%20Skin%20Microgeometry.pdf},
doi = {10.1145/2766894},
year = {2015},
date = {2015-08-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
volume = {34},
number = {4},
pages = {133},
address = {Los Angeles, CA},
abstract = {This demonstration of the effects of skin microstructure deformation on high-resolution dynamic facial rendering features the state-of-the-art skin in microstructure simulation, facial scanning, and rendering. Facial animations made with the technique show more realistic and expressive skin under facial expression.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Fyffe, Graham; Tonwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Talks, pp. 1–1, ACM Press, 2015, ISBN: 978-1-4503-3636-9.
@inproceedings{graham_near-instant_2015,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Paul Graham and Graham Fyffe and Borom Tonwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Near-Instant%20Capture%20of%20High-Resolution%20Facial%20Geometry%20and%20Reflectance.pdf},
doi = {10.1145/2775280.2792561},
isbn = {978-1-4503-3636-9},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Talks},
pages = {1–1},
publisher = {ACM Press},
abstract = {Modeling realistic human characters is frequently done using 3D recordings of the shape and appearance of real people, often across a set of different facial expressions to build blendshape facial models. Believable characters that cross the "Uncanny Valley" require high-quality geometry, texture maps, reflectance properties, and surface detail at the level of skin pores and fine wrinkles. Unfortunately, there has not yet been a technique for recording such datasets that is near-instantaneous and low-cost. While some facial capture techniques are instantaneous and inexpensive [Beeler et al. 2010], these do not generally provide lighting-independent texture maps, specular reflectance information, or high-resolution surface normal detail for relighting. In contrast, techniques which use multiple photographs from spherical lighting setups [Ghosh et al. 2011] do capture such reflectance properties, at the expense of longer capture times and complicated custom equipment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Paetzel, Maike; DeVault, David
Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection Proceedings Article
In: Proceedings of SEMDIAL 2015 goDIAL, pp. 113 – 121, Gothenburg, Sweden, 2015.
@inproceedings{manuvinakurike_reducing_2015,
title = {Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection},
author = {Ramesh Manuvinakurike and Maike Paetzel and David DeVault},
url = {http://ict.usc.edu/pubs/Reducing%20the%20Cost%20of%20Dialogue%20System%20Training%20and%20Evaluation%20with%20Online,%20Crowd-Sourced%20Dialogue%20Data%20Collection.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of SEMDIAL 2015 goDIAL},
pages = {113 – 121},
address = {Gothenburg, Sweden},
abstract = {This paper presents and analyzes an approach to crowd-sourced spoken dialogue data collection. Our approach enables low cost collection of browser-based spoken dialogue interactions between two remote human participants (human-human condition) as well as one remote human participant and an automated dialogue system (human-agent condition). We present a case study in which 200 remote participants were recruited to participate in a fast-paced image matching game, and which included both human-human and human-agent conditions. We discuss several technical challenges encountered in achieving this crowd-sourced data collection, and analyze the costs in time and money of carrying out the study. Our results suggest the potential of crowdsourced spoken dialogue data to lower costs and facilitate a range of research in dialogue modeling, dialogue system design, and system evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
When the going gets tough: Grit predicts costly perseverance Journal Article
In: Journal of Research in Personality, vol. 59, pp. 15–22, 2015, ISSN: 00926566.
@article{lucas_when_2015,
title = {When the going gets tough: Grit predicts costly perseverance},
author = {Gale M. Lucas and Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/When%20the%20going%20gets%20tough-Grit%20predicts%20costly%20perseverance.pdf},
doi = {10.1016/j.jrp.2015.08.004},
issn = {00926566},
year = {2015},
date = {2015-08-01},
journal = {Journal of Research in Personality},
volume = {59},
pages = {15–22},
abstract = {In this research, we investigate how grittier individuals might incur some costs by persisting when they could move on. Grittier participants were found to be less willing to give up when failing even though they were likely to incur a cost for their persistence. First, grittier participants are more willing to risk failing to complete a task by persisting on individual items. Second, when they are losing, they expend more effort and persist longer in a game rather than quit. Gritty participants have more positive emotions and expectations toward the task, which mediates the relationship between grit and staying to persist when they are losing. Results show gritty individuals are more willing to risk suffering monetary loss to persist.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Feng, Andrew; Leuski, Anton; Casas, Dan; Shapiro, Ari
Smart Mobile Virtual Humans: “Chat with Me!” Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 475–478, Springer, Delft, Netherlands, 2015.
@inproceedings{kang_smart_2015,
title = {Smart Mobile Virtual Humans: “Chat with Me!”},
author = {Sin-Hwa Kang and Andrew Feng and Anton Leuski and Dan Casas and Ari Shapiro},
url = {http://ict.usc.edu/pubs/Smart%20Mobile%20Virtual%20Humans%20-%20Chat%20with%20Me.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {475–478},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {In this study, we are interested in exploring whether people would talk with 3D animated virtual humans using a smartphone for a longer amount of time as a sign of feeling rapport [5], compared to non-animated or audio-only characters in everyday life. Based on previous studies [2, 7, 10], users prefer animated characters in emotionally engaged interactions when the characters were displayed on mobile devices, yet in a lab setting. We aimed to reach a broad range of users outside of the lab in natural settings to investigate the potential of our virtual human on smartphones to facilitate casual, yet emotionally engaging conversation. We also found that the literature has not reached a consensus regarding the ideal gaze patterns for a virtual human, one thing researchers agree on is that inappropriate gaze could negatively impact conversations at times, even worse than receiving no visual feedback at all [1, 4]. Everyday life may bring the experience of awkwardness or uncomfortable sentiments in reaction to continuous mutual gaze. On the other hand, gaze aversion could also make a speaker think their partner is not listening. Our work further aims to address this question of what constitutes appropriate eye gaze in emotionally engaged interactions. We developed a 3D animated and chat-based virtual human which presented emotionally expressive nonverbal behaviors such as facial expressions, head gestures, gaze, and other upper body movements (see Figure 1). The virtual human displayed appropriate gaze that was either consisted of constant mutual gaze or gaze aversion based on a statistical model of saccadic eye movement [8] while listening. Both gaze patterns were accompanied by other forms of appropriate nonverbal feedback. To explore the question of optimal communicative medium, we distributed our virtual human application to users via an app store for Android-powered phones (i.e. Google Play Store) in order to target users who owned a smartphone and could use our application in various natural settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Leuski, Anton; Marsella, Stacy; Casas, Dan; Kang, Sin-Hwa; Shapiro, Ari
A Platform for Building Mobile Virtual Humans Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 310–319, Springer, Delft, Netherlands, 2015.
@inproceedings{feng_platform_2015,
title = {A Platform for Building Mobile Virtual Humans},
author = {Andrew Feng and Anton Leuski and Stacy Marsella and Dan Casas and Sin-Hwa Kang and Ari Shapiro},
url = {http://ict.usc.edu/pubs/A%20Platform%20for%20Building%20Mobile%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {310--319},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {We describe an authoring framework for developing virtual humans on mobile applications. The framework abstracts many elements needed for virtual human generation and interaction, such as the rapid development of nonverbal behavior, lip syncing to speech, dialogue management, access to speech transcription services, and access to mobile sensors such as the microphone, gyroscope and location components.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Bolas, Mark; Debevec, Paul
An Automultiscopic Projector Array for Interactive Digital Humans Proceedings Article
In: SIGGRAPH 2015, pp. 1–1, ACM Press, Los Angeles, CA, 2015, ISBN: 978-1-4503-3635-2.
@inproceedings{jones_automultiscopic_2015,
title = {An Automultiscopic Projector Array for Interactive Digital Humans},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Automultiscopic%20Projector%20Array%20for%20Interactive%20Digital%20Humans.pdf},
doi = {10.1145/2782782.2792494},
isbn = {978-1-4503-3635-2},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015},
pages = {1–1},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear. Our display uses a dense array of video projectors to generate many images with high-angular density over a wide-field of view. As each user moves around the display, their eyes smoothly transition from one view to the next. The display is ideal for displaying life-size human subjects as it allows for natural personal interactions with 3D cues such as eye gaze and spatial hand gestures. In this installation, we will explore ”time-offset” interactions with recorded 3D human subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Gratch, Jonathan; Ustun, Volkan
Towards Emotion in Sigma: From Appraisal to Attention Proceedings Article
In: Proceedings of AGI 2015, pp. 142 – 151, Springer International Publishing, Berlin, Germany, 2015.
@inproceedings{rosenbloom_towards_2015,
title = {Towards Emotion in Sigma: From Appraisal to Attention},
author = {Paul S. Rosenbloom and Jonathan Gratch and Volkan Ustun},
url = {http://ict.usc.edu/pubs/Towards%20Emotion%20in%20Sigma%20-%20From%20Appraisal%20to%20Attention.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of AGI 2015},
volume = {9205},
pages = {142 – 151},
publisher = {Springer International Publishing},
address = {Berlin, Germany},
abstract = {A first step is taken towards incorporating emotional processing into Sigma, a cognitive architecture that is grounded in graphical models, with the addition of appraisal variables for expectedness and desirability plus their initial implications for attention at two levels of the control hierarchy. The results leverage many of Sigma's existing capabilities but with a few key additions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Demski, Abram
Expression Graphs Unifying Factor Graphs and Sum-Product Networks Proceedings Article
In: Artificial General Intelligence, pp. 241–250, Springer, Berlin, Germany, 2015.
@inproceedings{demski_expression_2015,
title = {Expression Graphs Unifying Factor Graphs and Sum-Product Networks},
author = {Abram Demski},
url = {http://ict.usc.edu/pubs/Expression%20Graphs%20Unifying%20Factor%20Graphs%20and%20Sum-Product%20Networks.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Artificial General Intelligence},
pages = {241–250},
publisher = {Springer},
address = {Berlin, Germany},
abstract = {Factor graphs are a very general knowledge representation, subsuming many existing formalisms in AI. Sum-product networks are a more recent representation, inspired by studying cases where factor graphs are tractable. Factor graphs emphasize expressive power, while sum-product networks restrict expressiveness to get strong guarantees on speed of inference. A sum-product network is not simply a restricted factor graph, however. Although the inference algorithms for the two structures are very similar, translating a sum-product network into factor graph representation can result in an exponential slowdown. We propose a formalism which generalizes factor graphs and sum-product networks, such that inference is fast in cases whose structure is close to a sum-product network.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham; Alexander, Oleg; Barbiç, Jernej; Li, Hao; Ghosh, Abhijeet; Debevec, Paul
Skin Microstructure Deformation with Displacement Map Convolution Journal Article
In: ACM Transactions on Graphics, vol. 34, no. 4, pp. 1–10, 2015, ISSN: 07300301.
@article{nagano_skin_2015,
title = {Skin Microstructure Deformation with Displacement Map Convolution},
author = {Koki Nagano and Graham Fyffe and Oleg Alexander and Jernej Barbiç and Hao Li and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Skin%20Microstructure%20Deformation%20with%20Displacement%20Map%20Convolution.pdf},
doi = {10.1145/2766894},
issn = {07300301},
year = {2015},
date = {2015-07-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
journal = {ACM Transactions on Graphics},
volume = {34},
number = {4},
pages = {1–10},
address = {Los Angeles, CA},
abstract = {We present a technique for synthesizing the effects of skin microstructure deformation by anisotropically convolving a high-resolution displacement map to match normal distribution changes in measured skin samples. We use a 10-micron resolution scanning technique to measure several in vivo skin samples as they are stretched and compressed in different directions, quantifying how stretching smooths the skin and compression makes it rougher. We tabulate the resulting surface normal distributions, and show that convolving a neutral skin microstructure displacement map with blurring and sharpening filters can mimic normal distribution changes and microstructure deformations. We implement the spatially-varying displacement map filtering on the GPU to interactively render the effects of dynamic microgeometry on animated faces obtained from high-resolution facial scans.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nouri, Elnaz; Traum, David
Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game Proceedings Article
In: Proceeding of AHFE 2015, Las Vegas, NV, 2015.
@inproceedings{nouri_cross_2015,
title = {Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Cross%20cultural%20report%20of%20values%20and%20decisions%20in%20the%20multi%20round%20ultimatum%20game%20and%20the%20centipede%20game.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceeding of AHFE 2015},
address = {Las Vegas, NV},
abstract = {This paper investigates the cultural differences in decision making behavior of people from the US and India. We study players from these cultures playing the Multi Round Ultimatum Game and the Centipede Game online. In order to study how people from different cultures evaluate decisions we use criteria from the Multi Attribute Relational Values (MARV) survey. Our results confirm the existence of cultural differences in how people from US and India make decisions in the Ultimatum and Centipede games. We also observe differences in responses to survey questions implying differences in the amount of importance that the two cultures assign to the MARV decision making criteria.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jon; Lucas, Gale; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli
To tweet or not to tweet: The question of emotion and excitement about sporting events Proceedings Article
In: Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion, Geneva, Switzerland, 2015.
@inproceedings{gratch_tweet_2015,
title = {To tweet or not to tweet: The question of emotion and excitement about sporting events},
author = {Jon Gratch and Gale Lucas and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler},
url = {http://ict.usc.edu/pubs/To%20tweet%20or%20not%20to%20tweet%20-The%20question%20of%20emotion%20and%20excitement%20about%20sporting%20events.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion},
address = {Geneva, Switzerland},
abstract = {Sporting events can serve as laboratories to explore emotion and computational tools provide new ways to examine emotional processes “in the wild”. Moreover, emotional processes are assumed -but untested- in sports economics. For example, according to the well-studied uncertainty of outcome hypothesis (UOH), “close” games are more exciting and therefore better attended. If one team were certain to win, it would take away a major source of excitement, reducing positive affect, and therefore decreasing attendance. The role of emotion here is assumed but has not been tested; furthermore, the measures used (ticket sales, attendance, TV-viewership) do not allow for such a test because they are devoid of emotional content. To address this problem, we use tweets per minute (specifically, tweets posted during 2014 World Cup with official game hashtags). Sentiment analysis of these tweets can give interesting insights into what emotional processes are involved. Another benefit of tweets is that they are dynamic, and novel results from dynamic analyses (of TV-viewership) suggest that the UOH effect can actually reverse as games unfold (people switch channels away from close games). We therefore also reconsider the UOH, specifically, extending it by both examining sentiment and dynamic changes during the game. To consider such changes, we focus on games that could have been close (high in uncertainty), but ended up being lower in uncertainty. We operationalize such unexpected certainty of outcome as the extent to which games are predicted to be “close” (based on betting odds), but ended up with a bigger difference between the teams’ scores than was expected. Statistical analyses revealed that, contrary to the UOH, games with a bigger difference in score between teams than expected had higher tweets per minute. We also performed sentiment analysis, categorizing each tweet as positive, negative or neutral, and found that games with higher tweets per minute also have a higher percentage of negative tweets. Furthermore, games that have a bigger difference than expected have a higher percentage of negative tweets (compared to games closer to what is expected). This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion (and not positive emotion). The results are discussed in terms of innovations in methodology and understanding the role of emotion for “tuning in” to real world events. Further research could explore the specific mechanisms that link negative sentiment to excitement, such as worry or out-group derogation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cummins, Nicholas; Scherer, Stefan; Krajewski, Jarek; Schnieder, Sebastian; Epps, Julien; Quatieri, Thomas F.
A Review of Depression and Suicide Risk Assessment Using Speech Analysis Journal Article
In: Speech Communication, vol. 71, pp. 10 – 49, 2015, ISSN: 0167-6393.
@article{cummins_review_2015,
title = {A Review of Depression and Suicide Risk Assessment Using Speech Analysis},
author = {Nicholas Cummins and Stefan Scherer and Jarek Krajewski and Sebastian Schnieder and Julien Epps and Thomas F. Quatieri},
url = {http://www.sciencedirect.com/science/article/pii/S0167639315000369},
doi = {http://dx.doi.org/10.1016/j.specom.2015.03.004},
issn = {0167-6393},
year = {2015},
date = {2015-07-01},
journal = {Speech Communication},
volume = {71},
pages = {10 – 49},
abstract = {This paper is the first review into the automatic analysis of speech for use as an objective predictor of depression and suicidality. Both conditions are major public health concerns; depression has long been recognised as a prominent cause of disability and burden worldwide, whilst suicide is a misunderstood and complex course of death that strongly impacts the quality of life and mental health of the families and communities left behind. Despite this prevalence the diagnosis of depression and assessment of suicide risk, due to their complex clinical characterisations, are difficult tasks, nominally achieved by the categorical assessment of a set of specific symptoms. However many of the key symptoms of either condition, such as altered mood and motivation, are not physical in nature; therefore assigning a categorical score to them introduces a range of subjective biases to the diagnostic procedure. Due to these difficulties, research into finding a set of biological, physiological and behavioural markers to aid clinical assessment is gaining in popularity. This review starts by building the case for speech to be considered a key objective marker for both conditions; reviewing current diagnostic and assessment methods for depression and suicidality including key non-speech biological, physiological and behavioural markers and highlighting the expected cognitive and physiological changes associated with both conditions which affect speech production. We then review the key characteristics; size, associated clinical scores and collection paradigm, of active depressed and suicidal speech databases. The main focus of this paper is on how common paralinguistic speech characteristics are affected by depression and suicidality and the application of this information in classification and prediction systems. The paper concludes with an in-depth discussion on the key challenges – improving the generalisability through greater research collaboration and increased standardisation of data collection, and the mitigating unwanted sources of variability – that will shape the future research directions of this rapidly growing field of speech processing research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2002
Traum, David; Rickel, Jeff
Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_embodied_2002,
title = {Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds},
author = {David Traum and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Embodied%20Agents%20for%20Multi-party%20Dialogue%20in%20Immersive%20%20Virtual%20Worlds.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kovar, Lucas; Gleicher, Michael; Pighin, Frédéric
Motion Graphs Proceedings Article
In: Proceedings of SIGGRAPH '02, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{kovar_motion_2002,
title = {Motion Graphs},
author = {Lucas Kovar and Michael Gleicher and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Motion%20Graphs.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {Proceedings of SIGGRAPH '02},
address = {San Antonio, TX},
abstract = {n this paper we present a novel method for creating realistic, controllable motion. Given a corpus of motion capture data, we automatically construct a directed graph called a motion graph that encapsulates connections among the database. The motion graph consists both of pieces of original motion and automatically generated transitions. Motion can be generated simply by building walks on the graph. We present a general framework for extracting particular graph walks that meet a user's specifications. We then show how this framework can be applied to the specific problem of generating different styles of locomotion along arbitrary paths.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Waese, Jamie; Hawkins, Tim
A Lighting Reproduction Approach to Live-Action Compositing Proceedings Article
In: SIGGRAPH 2002, pp. 547–556, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_lighting_2002,
title = {A Lighting Reproduction Approach to Live-Action Compositing},
author = {Paul Debevec and Andreas Wenger and Chris Tchou and Andrew Gardner and Jamie Waese and Tim Hawkins},
url = {http://ict.usc.edu/pubs/A%20Lighting%20Reproduction%20Approach%20to%20Live-Action%20Compositing.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {SIGGRAPH 2002},
pages = {547–556},
address = {San Antonio, TX},
abstract = {We describe a process for compositing a live performance of an actor into a virtual set wherein the actor is consistently illuminated by the virtual environment. The Light Stage used in this work is a two-meter sphere of inward-pointing RGB light emitting diodes focused on the actor, where each light can be set to an arbitrary color and intensity to replicate a real-world or virtual lighting environment. We implement a digital two-camera infrared matting system to composite the actor into the background plate of the environment without affecting the visible-spectrum illumination on the actor. The color reponse of the system is calibrated to produce correct color renditions of the actor as illuminated by the environment. We demonstrate moving-camera composites of actors into real-world environments and virtual sets such that the actor is properly illuminated by the environment into which they are composited.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Influence of Emotion on Belief for Virtual Training Simulations Proceedings Article
In: Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2002,
title = {Modeling the Influence of Emotion on Belief for Virtual Training Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20influence%20of%20emotion.pdf},
year = {2002},
date = {2002-06-01},
booktitle = {Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation},
address = {Orlando, FL},
abstract = {Recognizing and managing emotion in oneself and in those under ones command is an important component of leadership training. Most computational models of emotion have focused on the problem of identifying emotional features of the physical environment and mapping that into motivations to act in the world. But emotions also influence how we perceive the world and how we communicate that perception to others. This paper outlines an initial computational foray into this more vexing problem.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Lent, Michael
Virtual Humans as Participants vs. Virtual Humans as Actors Proceedings Article
In: AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_virtual_2002,
title = {Virtual Humans as Participants vs. Virtual Humans as Actors},
author = {Andrew S. Gordon and Michael Lent},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20as%20Participants%20vs%20Virtual%20Humans%20as%20Actors.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu Proceedings Article
In: The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_enabling_2002,
title = {Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Enabling%20and%20recognizing%20strategic%20play%20in%20strategy%20games-%20Lessons%20from%20Sun%20Tzu.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
abstract = {The interactive entertainment genre of the strategy game entertains users by allowing them to engage in strategic play, which should encourage game designers to devote development efforts toward facilitating users that wish to employ commonsense strategies, and to recognize and react to specific user strategies during game play. This paper attempts to facilitate these development efforts by identifying and analyzing 43 strategies from Sun Tzu's The Art of War, which are broadly applicable across games in the strategy game genre. For each strategy, a set of specific actions are identified that should be provided to users to enable their execution, along with generalized recognition rules that can facilitatethe design of entertaining responses to users' strategic behavior. Consideration of how the enabling actions could be incorporated into an existing strategy game is provided.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Fleischman, Michael; Hovy, Eduard
Emotional Variation in Speech-Based Natural Language Generation Proceedings Article
In: International Natural Language Generation Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{fleischman_emotional_2002,
title = {Emotional Variation in Speech-Based Natural Language Generation},
author = {Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/Emotional%20Variation%20in%20Speech-Based%20Natural%20Language%20Generation.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Natural Language Generation Conference},
abstract = {We present a framework for handling emotional variations in a speech-based natural language system for use in the MRE virtual training environment. The system is a first step toward addressing issues in emotion-based modeling of verbal communicative behavior. We cast the problem of emotional generation as a distance minimization task, in which the system chooses between multiple valid realizations for a given input based on the emotional distance of each realization from the speaker's attitude toward that input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Tutorial on Image-Based Lighting Journal Article
In: IEEE Computer Graphics and Applications, 2002.
Links | BibTeX | Tags: Graphics
@article{debevec_tutorial_2002,
title = {A Tutorial on Image-Based Lighting},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Lighting.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Computer Graphics and Applications},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.; Han, Changhee; Lent, Michael
Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments Proceedings Article
In: Proceedings of 14th Innovative Applications of Artificial Intelligence Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{hill_applying_2002,
title = {Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments},
author = {Randall W. Hill and Changhee Han and Michael Lent},
url = {http://ict.usc.edu/pubs/Applying%20Perceptually%20Driven%20Cognitive%20Mapping%20To%20Virtual%20Urban%20Environments.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of 14th Innovative Applications of Artificial Intelligence Conference},
abstract = {This paper describes a method for building a cognitive map of a virtual urban environment. Our routines enable virtual humans to map their environment using a realistic model of perception. We based our implementation on a computational framework proposed by Yeap and Jefferies (Yeap & Jefferies 1999) for representing a local environment as a structure called an Absolute Space Representation (ASR). Their algorithms compute and update ASRs from a 2-1/2D 1 sketch of the local environment, and then connect the ASRs together to form a raw cognitive map. Our work extends the framework developed by Yeap and Jefferies in three important ways. First, we implemented the framework in a virtual training environment, the Mission Rehearsal Exercise (Swartout et al. 2001). Second, we describe a method for acquiring a 2- 1/2D sketch in a virtual world, a step omitted from their framework, but which is essential for computing an ASR. Third, we extend the ASR algorithm to map regions that are partially visible through exits of the local space. Together, the implementation of the ASR algorithm along with our extensions will be useful in a wide variety of applications involving virtual humans and agents who need to perceive and reason about spatial concepts in urban environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication Proceedings Article
In: Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_ideas_2002,
title = {Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Ideas%20on%20Multi-layer%20Dialogue%20Management%20for%20Multi-party,%20Multi-conversation,%20Multi-modal%20Communication.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
A step toward irrationality: using emotion to change belief Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 334–341, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_step_2002,
title = {A step toward irrationality: using emotion to change belief},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20step%20toward%20irrationality-%20using%20emotion%20to%20change%20belief.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {334–341},
address = {Bologna, Italy},
abstract = {Emotions have a powerful impact on behavior and beliefs. The goal of our research is to create general computational models of this interplay of emotion, cognition and behavior to inform the design of virtual humans. Here, we address an aspect of emotional behavior that has been studied extensively in the psychological literature but largely ignored by computational approaches, emotion-focused coping. Rather than motivating external action, emotion-focused coping strategies alter beliefs in response to strong emotions. For example an individual may alter beliefs about the importance of a goal that is being threatened, thereby reducing their distress. We present a preliminary model of emotion-focused coping and discuss how coping processes, in general, can be coupled to emotions and behavior. The approach is illustrated within a virtual reality training environment where the models are used to create virtual human characters in high-stress social situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pighin, Frédéric; Szeliski, Richard; Salesin, David H.
Modeling and Animating Realistic Faces from Images Journal Article
In: International Journal on Computer Vision, vol. 50, pp. 143–169, 2002.
Abstract | Links | BibTeX | Tags:
@article{pighin_modeling_2002,
title = {Modeling and Animating Realistic Faces from Images},
author = {Frédéric Pighin and Richard Szeliski and David H. Salesin},
url = {http://ict.usc.edu/pubs/Modeling%20and%20Animating%20Realistic%20Faces%20from%20Images.pdf},
year = {2002},
date = {2002-01-01},
journal = {International Journal on Computer Vision},
volume = {50},
pages = {143–169},
abstract = {We present a new set of techniques f or mo deling and animating realistic f aces f rom photographs and videos. Given a set of face photographs taken simultaneously, our modeling technique allows the interactive recovery of a textured 3D face model. By repeating this process for several facial expressions, we acquire a set of faces models that can be linearly combined to express a wide range of expressions. Given a video sequence, this linear face model can be used to estimate the face position, orientation, and facial expression at each frame. We illustrate these techniques on several datasets and demonstrate robust estimations of detailed face geometry and motion.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
Details of the CFOR Planner Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2002, 2002.
Links | BibTeX | Tags: Virtual Humans
@techreport{gratch_details_2002,
title = {Details of the CFOR Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Details%20of%20the%20CFOR%20Planner.pdf},
year = {2002},
date = {2002-01-01},
number = {ICT TR 01.2002},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Hill, Randall W.; Kim, Youngjun; Gratch, Jonathan
Anticipating where to look: predicting the movements of mobile agents in complex terrain Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 821–827, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hill_anticipating_2002,
title = {Anticipating where to look: predicting the movements of mobile agents in complex terrain},
author = {Randall W. Hill and Youngjun Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Anticipating%20Where%20to%20Look-%20Predicting%20the%20Movements%20of%20Mobile%20Agents%20in%20Complex%20Terrain.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {821–827},
address = {Bologna, Italy},
abstract = {This paper describes a method for making short-term predictions about the movement of mobile agents in complex terrain. Virtual humans need this ability in order to shift their visual attention between dynamic objects-predicting where an object will be located a few seconds in the future facilitates the visual reacquisition of the target object. Our method takes into account environmental cues in making predictions and it also indicates how long the prediction is valid, which varies depending on the context. We implemented this prediction technique in a virtual pilot that flies a helicopter in a synthetic environment.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff; Marsella, Stacy C.; Gratch, Jonathan; Hill, Randall W.; Traum, David; Swartout, William
Toward a New Generation of Virtual Humans for Interactive Experiences Journal Article
In: IEEE Intelligent Systems, 2002.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{rickel_toward_2002,
title = {Toward a New Generation of Virtual Humans for Interactive Experiences},
author = {Jeff Rickel and Stacy C. Marsella and Jonathan Gratch and Randall W. Hill and David Traum and William Swartout},
url = {http://ict.usc.edu/pubs/Toward%20a%20New%20Generation%20of%20Virtual%20Humans%20for%20Interactive%20Experiences.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rickel, Jeff; Johnson, W. Lewis
Extending Virtual Human to Support Team Training in Virtual Reality Book Section
In: Lakemeyer, G.; Nebel, B. (Ed.): Exploring Artificial Intelligence in the New Millennium, Morgan Kaufmann Publishers, San Francisco, CA, 2002.
Abstract | Links | BibTeX | Tags:
@incollection{rickel_extending_2002,
title = {Extending Virtual Human to Support Team Training in Virtual Reality},
author = {Jeff Rickel and W. Lewis Johnson},
editor = {G. Lakemeyer and B. Nebel},
url = {http://ict.usc.edu/pubs/Extending%20Virtual%20Humans%20to%20Support%20Team%20Training%20in%20Virtual%20Reality.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Exploring Artificial Intelligence in the New Millennium},
publisher = {Morgan Kaufmann Publishers},
address = {San Francisco, CA},
abstract = {This paper describes the use of virtual humans and distributed virtual reality to support team training, where students must learn their individual role in the team as well as how to coordinate their actions with their teammates. Students, instructors, and virtual humans cohabit a three-dimensional, interactive, simulated mock-up of their work environment, where they can practice together in realistic situations. The virtual humans can serve as instructors for individual students, and they can substitute for missing team members, allowing students to practive team tasks when some or all human instructors and teammates are unavailable. The paper describes our learning environment, the issues that arise in developing virtual humans for team training, and our design for the virtual humans, which is an extension of our Steve agent previously used for one-on-one tutoring.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Morie, Jacquelyn; Iyer, Kumar; Valanejad, R.; Sadek, Ramy; Miraglia, D.; Milam, D.
Emotionally Evocative Environments for Training Proceedings Article
In: Proceedings of the 23th Army Science Conference, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_emotionally_2002,
title = {Emotionally Evocative Environments for Training},
author = {Jacquelyn Morie and Kumar Iyer and R. Valanejad and Ramy Sadek and D. Miraglia and D. Milam},
url = {http://ict.usc.edu/pubs/EMOTIONALLY%20EVOCATIVE%20ENVIRONMENTS%20FOR%20TRAINING.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of the 23th Army Science Conference},
address = {Orlando, FL},
abstract = {This paper describes a project currently in progress at the University of Southern California's Institute for Creative Technologies (ICT). Much of the research at ICT involves developing better graphics, sound and artificial intelligence to be used in creating the next generation of training tools for the United States Army. Our project focuses on the use of emotional responses as an enhancement for training. Research indicates that an emotional connection is a strong factor in how and what we remember. In addition, real world situations often evoke surprising and significant emotional reactions that soldiers must deal with. Few current immersive training scenarios, however, focus on the emotional state of the trainee, limiting training scenarios to basic objective elements. The Sensory Environments Evaluation (SEE) Project at ICT is investigating the potential of emotionally compelling environments for more effective training. We do this by skillfully combining the sensory inputs available in virtual environments. Our current efforts concentrate on sight and sound; smell will be included as scent delivery methods improve. Evaluation studies are planned to determine the effectiveness of the techniques we are developing.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Rickel, Jeff; André, Elisabeth; Cassell, Justine; Petajan, Eric; Badler, Norman
Creating Interactive Virtual Humans: Some Assembly Required Journal Article
In: IEEE Intelligent Systems, pp. 54–63, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{gratch_creating_2002,
title = {Creating Interactive Virtual Humans: Some Assembly Required},
author = {Jonathan Gratch and Jeff Rickel and Elisabeth André and Justine Cassell and Eric Petajan and Norman Badler},
url = {http://ict.usc.edu/pubs/Creating%20Interactive%20Virtual%20Humans-%20Some%20Assembly%20Required.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
pages = {54–63},
abstract = {Science fiction has long imagined a future populated with artificial humans–human-looking devices with human-like intelligence. Although Asimov's benevolent robots and the Terminator movies' terrible war machines are still a distant fantasy, researchers across a wide range of disciplines are beginning to work together toward a more modest goal–building virtual humans. These software entities look and act like people and can engage in conversation and collaborative tasks, but they live in simulated environments. With the untidy problems of sensing and acting in the physical world thus dispensed, the focus of virtual human research is on capturing the richness and dynamics of human behavior.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2001
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation Proceedings Article
In: IEEE Workshop on Applications of Signal Processing to Audio and Acoustics, New Paltz, New York, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2001,
title = {Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/ROBUSTNESS%20OF%20THE%20EIGENFILTER%20FOR%20VARIATIONS%20IN%20LISTENER%20RESPONSES%20FOR%20SELECTIVE%20SIGNAL%20CANCELLATION.pdf},
year = {2001},
date = {2001-10-01},
booktitle = {IEEE Workshop on Applications of Signal Processing to Audio and Acoustics},
address = {New Paltz, New York},
abstract = {Selectively cancelling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, automobile, teleconferencing, office, industrial and other applications. We have proposed the eigenfilter for selectively cancelling signals in one direction, while attempting to retain them at unintentional directions. In this paper we investigate the behaviour of the performance measure (i.e., the gain) for a vowel and an unvoiced fricative, when the listener moves his head, in an automobile type environment. We show that in such a situation, a large energy in the difference between the impulse responses at a listener's location may affect the gain substantially. listeners in which only a subset wish to listen to the audio signal.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Douglas, Jay
Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after Proceedings Article
In: Proceedings of International Conference on Virtual Storytelling, pp. 100–112, Avignon, France, 2001, ISBN: 3-540-42611-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_adaptive_2001,
title = {Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after},
author = {Jonathan Gratch and Jay Douglas},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
doi = {10.1007/3-540-45420-9_12},
isbn = {3-540-42611-6},
year = {2001},
date = {2001-10-01},
booktitle = {Proceedings of International Conference on Virtual Storytelling},
pages = {100–112},
address = {Avignon, France},
series = {LNCS},
abstract = {Interacting Storytelling systems integrate AI techniques such as planning with narrative representations to generate stories. In this paper, we discuss the use of planning formalisms in Interactive Storytelling from the perspective of story generation and authoring. We compare two different planning formalisms, Hierarchical Task Network (HTN) planning and Heuristic Search Planning (HSP). While HTN provide a strong basis for narrative coherence in the context of interactivity, HSP offer additional flexibility and the generation of stories and the mechanisms for generating comic situations.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff
Intelligent Virtual Agents for Education and Training: Opportunities and Challenges Proceedings Article
In: Intelligent Virtual Agents: The 3rd International Workshop, Madrid, Spain, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{rickel_intelligent_2001,
title = {Intelligent Virtual Agents for Education and Training: Opportunities and Challenges},
author = {Jeff Rickel},
url = {http://ict.usc.edu/pubs/Intelligent%20Virtual%20Agents%20for%20Education%20and%20Training-%20Opportunities%20and%20Challenges.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Intelligent Virtual Agents: The 3rd International Workshop},
address = {Madrid, Spain},
abstract = {Interactive virtual worlds provide a powerful medium for ex- periential learning. Intelligent virtual agents can cohabit virtual worlds with people and facilitate such learning as guides, mentors, and team- mates. This paper reviews the main pedagogical advantages of animated agents in virtual worlds, discusses two key research challenges, and out- lines an ambitious new project addressing those challenges.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
New Factors in Room Equalization Using a Fuzzy Logic Approach Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, New York, NY, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_new_2001,
title = {New Factors in Room Equalization Using a Fuzzy Logic Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/New%20Factors%20in%20Room%20Equalization%20Using%20a%20Fuzzy%20Logic%20Approach.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {New York, NY},
abstract = {Room acoustical modes, particularly in small rooms, cause a signiï¬cant variation in the room responses measured at di!erent locations. Responses measured only a few cm apart can vary by up to 15-20 dB at certain frequencies. This makes it diffcult to equalize an audio system for multiple simultaneous listeners. Previous methods have utilized multiple microphones and spatial averaging with equal weighting. In this paper we present a different multiple point equalization method. We ï¬rst determine representative prototypical room responses derived from several room responses that share similar characteristics, using the fuzzy unsupervised learning method. These prototypical responses can then be combined to form a general point response. When we use the inverse of the general point response as an equalizing ï¬lter, our results show a signiï¬cant improvement in equalization performance over the spatial averaging methods. This simultaneous equalization is achieved by suppressing the peaks in the room magnitude spectrums. Applications of this method thus include equalization and multiple point sound control at home and in automobiles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans Proceedings Article
In: Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_effect_2001,
title = {The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affect-%20Modeling%20the%20Impact%20of%20Emotional%20State%20on%20the%20Behavior%20of%20Interactive%20Virtual%20Humans.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents},
address = {Montreal, Canada},
abstract = {A person's behavior provides signiï¬cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a range of cognitive and motor capabilities, a model of emotional appraisal, and a model of the impact of emotional state on physical behavior. We describe the key research issues, our approach, and an initial implementation in an Army peacekeeping scenario.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Douglas, Jay; Gratch, Jonathan
Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{douglas_adaptive_2001,
title = {Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After},
author = {Jay Douglas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {Creating dramatic narratives for real-time virtual reality environments is complicated by the lack of temporal distance between the occurrence of an event and its telling in the narrative. This paper describes the application of a multiprocessing operating system architecture to the creation of adaptive narratives, narratives that use autonomous actors or agents to create real-time dramatic experiences for human interactors. We also introduce the notion of dramatic acts and dramatic functions and indicate their use in constructing this real-time drama.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cohen, Jonathan; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Real-Time High-Dynamic Range Texture Mapping Proceedings Article
In: Eurographics Rendering Workshop, 2001.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{cohen_real-time_2001,
title = {Real-Time High-Dynamic Range Texture Mapping},
author = {Jonathan Cohen and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Real-Time%20High-Dynamic%20Range%20Texture%20Mapping.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Eurographics Rendering Workshop},
abstract = {This paper presents a technique for representing and displaying high dynamic-range texture maps (HDRTMs) using current graphics hardware. Dynamic range in real-world environments often far exceeds the range representable in 8-bit per-channel texture maps. The increased realism afforded by a high-dynamic range representation provides improved fidelity and expressiveness for interactive visualization of image-based models. Our technique allows for real-time rendering of scenes with arbitrary dynamic range, limited only by available texture memory. In our technique, high-dynamic range textures are decomposed into sets of 8- bit textures. These 8-bit textures are dynamically reassembled by the graphics hardware's programmable multitexturing system or using multipass techniques and framebuffer image processing. These operations allow the exposure level of the texture to be adjusted continuously and arbitrarily at the time of rendering, correctly accounting for the gamma curve and dynamic range restrictions of the display device. Further, for any given exposure only two 8-bit textures must be resident in texture memory simultaneously. We present implementation details of this technique on various 3D graphics hardware architectures. We demonstrate several applications, including high-dynamic range panoramic viewing with simulated auto-exposure, real-time radiance environment mapping, and simulated Fresnel reflection.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Hill, Randall W.; Gratch, Jonathan; Johnson, W. Lewis; Kyriakakis, Chris; Labore, Catherine; Lindheim, Richard; Marsella, Stacy C.; Miraglia, D.; Moore, Bridget; Morie, Jacquelyn; Rickel, Jeff; Thiebaux, Marcus; Tuch, L.; Whitney, Richard; Douglas, Jay
Toward the Holodeck: Integrating Graphics, Sound, Character and Story Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans, Virtual Worlds
@inproceedings{swartout_toward_2001,
title = {Toward the Holodeck: Integrating Graphics, Sound, Character and Story},
author = {William Swartout and Randall W. Hill and Jonathan Gratch and W. Lewis Johnson and Chris Kyriakakis and Catherine Labore and Richard Lindheim and Stacy C. Marsella and D. Miraglia and Bridget Moore and Jacquelyn Morie and Jeff Rickel and Marcus Thiebaux and L. Tuch and Richard Whitney and Jay Douglas},
url = {http://ict.usc.edu/pubs/Toward%20the%20Holodeck-%20Integrating%20Graphics,%20Sound,%20Character%20and%20Story.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {We describe an initial prototype of a holodeck-like environment that we have created for the Mission Rehearsal Exercise Project. The goal of the project is to create an experience learning system where the participants are immersed in an environment where they can encounter the sights, sounds, and circumstances of realworld scenarios. Virtual humans act as characters and coaches in an interactive story with pedagogical goals.},
keywords = {Social Simulation, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Modeling Emotions in the Mission Rehearsal Exercise Proceedings Article
In: Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation, pp. 457–466, Orlando, FL, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_modeling_2001,
title = {Modeling Emotions in the Mission Rehearsal Exercise},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Emotions%20in%20the%20Mission%20Rehearsal%20Exercise.pdf},
year = {2001},
date = {2001-05-01},
booktitle = {Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation},
pages = {457–466},
address = {Orlando, FL},
abstract = {This paper discusses our attempts to model realistic human behavior in the context of the Mission Rehearsal Exercise system (MRE), a high-end virtual training environment designed to support dismounted infantry training between a human participant and elements of his command. The system combines immersive graphics, sound, and interactive characters controlled by artificial intelligence programs. Our goal in this paper is to show how some of the daunting subtlety in human behavior can be modeled by intelligent agents and in particular to focus on the role of modeling typical human emotional responses to environmental stimuli.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Proceedings Article
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive 'interlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Browsing Image Collections with Representations of Commonsense Activities Journal Article
In: Journal of the American Society for Information Science and Technology, vol. 52, no. 11, pp. 925–929, 2001.
Abstract | Links | BibTeX | Tags: The Narrative Group
@article{gordon_browsing_2001,
title = {Browsing Image Collections with Representations of Commonsense Activities},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Browsing%20Image%20Collections%20with%20Representations%20of%20Commonsense%20Activities.PDF},
year = {2001},
date = {2001-01-01},
journal = {Journal of the American Society for Information Science and Technology},
volume = {52},
number = {11},
pages = {925–929},
abstract = {To support browsing-based subject access to image collections, it is necessary to provide users with networks of subject terms that are organized in an intuitive, richly interconnected manner. A principled approach to this task is to organize the subject terms by their relationship to activity contexts that are commonly understood among users. This article describes a methodology for creating networks of subject terms by manually representing a large number of common-sense activities that are broadly related to image subject terms. The application of this methodology to the Library of Congress Thesaurus for Graphic Materials produced 768 representations that supported users of a prototype browsing-based retrieval system in searching large, indexed photograph collections.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.
Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, pp. 278–285, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_tears_2001,
title = {Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Tears%20and%20Fears-%20Modeling%20emotions%20and%20emotional%20behaviors%20in%20synthetic%20agents.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
pages = {278–285},
address = {Montreal, Canada},
abstract = {Emotions play a critical role in creating engaging and believable characters to populate virtual worlds. Our goal is to create general computational models to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. In service of this goal, we integrate two complementary approaches to emotional modeling into a single unified system. Gratch's Émile system focuses on the problem of emotional appraisal: how emotions arise from an evaluation of how environmental events relate to an agent's plans and goals. Marsella et al. 's IPD system focuses more on the impact of emotions on behavior, including the impact on the physical expressions of emotional state through suitable choice of gestures and body language. This integrated model is layered atop Steve, a pedagogical agent architecture, and exercised within the context of the Mission Rehearsal Exercise, a prototype system designed to teach decision- making skills in highly evocative situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Debevec, Paul
A Photometric Approach to Digitizing Cultural Artifacts Proceedings Article
In: Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage, Glyfada, Greece, 2001.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_photometric_2001,
title = {A Photometric Approach to Digitizing Cultural Artifacts},
author = {Tim Hawkins and Jonathan Cohen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Photometric%20Approach%20to%20Digitizing%20Cultural%20Artifacts.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage},
address = {Glyfada, Greece},
abstract = {In this paper we present a photometry-based approach to the digital documentation of cultural artifacts. Rather than representing an artifact as a geometric model with spatially varying reflectance properties, we instead propose directly representing the artifact in terms of its reflectance field - the manner in which it transforms light into images. The principal device employed in our technique is a computer-controlled lighting apparatus which quickly illuminates an artifact from an exhaustive set of incident illumination directions and a set of digital video cameras which record the artifact's appearance under these forms of illumination. From this database of recorded images, we compute linear combinations of the captured images to synthetically illuminate the object under arbitrary forms of complex incident illumination, correctly capturing the effects of specular reflection, subsurface scattering, self-shadowing, mutual illumination, and complex BRDF's often present in cultural artifacts. We also describe a computer application that allows users to realistically and interactively relight digitized artifacts.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, C. M.; Narayanan, Shrikanth; Pieraccin, R.
Recognition of Negative Emotions from the Speech Signal Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop (ASRU 2001), 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{lee_recognition_2001,
title = {Recognition of Negative Emotions from the Speech Signal},
author = {C. M. Lee and Shrikanth Narayanan and R. Pieraccin},
url = {http://ict.usc.edu/pubs/Recognition%20of%20Negative%20Emotions%20from%20the%20Speech%20Signal.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop (ASRU 2001)},
abstract = {This paper reports on methods for automatic classification of spoken utterances based on the emotional state of the speaker. The data set used for the analysis comes from a corpus of human- machine dialogs recorded from a commercial application deployed by SpeechWorks. Linear discriminant classification with Gaussian class-conditional probability distribution and knearest neighborhood methods are used to classify utterances into two basic emotion states, negative and non-negative. The features used by the classifiers are utterance-level statistics of the fundamental frequency and energy of the speech signal. To improve classification performance, two specific feature selection methods are used; namely, promising first selection and forward feature selection. Principal component analysis is used to reduce the dimensionality of the features while maximizing classification accuracy. Improvements obtained by feature selection and PCA are reported in this paper. We reported the results.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Tchou, Chris; Debevec, Paul
Light Stage 2.0 Proceedings Article
In: SIGGRAPH Technical Sketches, pp. 217, 2001.
Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_light_2001,
title = {Light Stage 2.0},
author = {Tim Hawkins and Jonathan Cohen and Chris Tchou and Paul Debevec},
url = {http://ict.usc.edu/pubs/Light%20Stage%202.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
pages = {217},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Olsen, Mari; Traum, David; Ess-Dykema, Carol Van; Weinberg, Amy
Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System Proceedings Article
In: Machine Translation Summit VIII, Santiago de Compostela, Spain, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{olsen_implicit_2001,
title = {Implicit Cues for Explicit Generation: Using Telicity as a Cue for Tense Structure in Chinese to English MT System},
author = {Mari Olsen and David Traum and Carol Van Ess-Dykema and Amy Weinberg},
url = {http://ict.usc.edu/pubs/Implicit%20Cues%20for%20Explicit%20Generation-%20Using%20Telicity%20as%20a%20Cue%20for%20Tense%20Structure%20in%20Chinese%20to%20English%20MT%20System.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Machine Translation Summit VIII},
address = {Santiago de Compostela, Spain},
abstract = {In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Tense information is crucial to providing accurate and fluent translations into English. Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is optional in Chinese and is not present in the majority of sentences. We report on a project that assesses the relative contribution of the lexical aspect features of (a)telicity reflected in the Lexical Conceptual Structure of the input text, versus more overt aspectual and adverbial markers of tense, to suggest tense structure in the English translation of a Chinese newspaper corpus. Incorporating this information allows a 20% to 35% boost in the accuracy of tense relization with the best accuracy rate of 92% on a corpus of Chinese articles.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Dai; Ai, Hongmei; Kyriakakis, Chris; Kuo, C. -C. Jay
Embedded High-Quality Multichannel Audio Coding Proceedings Article
In: Conference on Media Processors, Symposium on Electronic Imaging, San Jose, CA, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{yang_embedded_2001,
title = {Embedded High-Quality Multichannel Audio Coding},
author = {Dai Yang and Hongmei Ai and Chris Kyriakakis and C. -C. Jay Kuo},
url = {http://ict.usc.edu/pubs/Embedded%20High-Quality%20Multichannel%20Audio%20Coding.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Conference on Media Processors, Symposium on Electronic Imaging},
address = {San Jose, CA},
abstract = {An embedded high-quality multi-channel audio coding algorithms is proposed in this research. The Karhunen-Loeve Transform (KLT) is applied to multichannel audio signals in the pre-processing stage to remove inter-channel redundancy. Then, after processing of several audio coding blocks, transformed coefficients are layered quantized and the bit stream is ordered according to their importance. The multichannel audio bit stream generated by the propoesed algorithm has a fully progressive property, which is highly desirable for audio multicast applications in heterogenous networks. Experimental results show that, compared with the MPEG Advanced Audio Coding (AAC) algorithm, the proposed algorithm achieves a better performance with both the objective MNR (Mask-to-Noise-Ratio) measurement and the subjective listening test at several different bit rates.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Waese, Jamie; Debevec, Paul
A Real Time High Dynamic Range Light Probe Proceedings Article
In: SIGGRAPH Technical Sketches, 2001.
Links | BibTeX | Tags: Graphics
@inproceedings{waese_real_2001,
title = {A Real Time High Dynamic Range Light Probe},
author = {Jamie Waese and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Real%20Time%20High%20Dynamic%20Range%20Light%20Probe.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
A Cluster Centroid Method for Room Response Equalization at Multiple Locations Proceedings Article
In: IEEE Workshop on the Applications of Signal Processing to Audio and Acoustics, pp. 55–58, New Platz, NY, 2001, ISBN: 0-7803-7126-7.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_cluster_2001,
title = {A Cluster Centroid Method for Room Response Equalization at Multiple Locations},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/A%20CLUSTER%20CENTROID%20METHOD%20FOR%20ROOM%20RESPONSE%20EQUALIZATION%20AT%20MULTIPLE%20LOCATIONS.pdf},
isbn = {0-7803-7126-7},
year = {2001},
date = {2001-01-01},
booktitle = {IEEE Workshop on the Applications of Signal Processing to Audio and Acoustics},
pages = {55–58},
address = {New Platz, NY},
abstract = {In this paper we address the problem of simultaneous room response equalization for multiple listeners. Traditional approaches to this problem have used a single microphone at the listening position to measure impulse responses from a loudspeaker and then use an inverse filter to correct the frequency response. The problem with that approach is that it only works well for that one point and in most cases is not practical even for one listener with a typical ear spacing of 18 cm. It does not work at all for other listeners in the room, or if the listener changes positions even slightly. We propose a new approach that is based on the Fuzzy c-means clustering technique. We use this method to design equalization filters and demonstrate that we can achieve better equalization performance for several locations in the room simultaneously as compared to single point or simple averaging methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Srinivasamurthy, Naveen; Narayanan, Shrikanth; Ortega, Antonio
Use of Model Transformations for Distributed Speech Recognition Proceedings Article
In: 4th ISCA Tutorial and Research Workshop on Speech Synthesis, pp. 113–116, Sophia Antipolis, France, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{srinivasamurthy_use_2001,
title = {Use of Model Transformations for Distributed Speech Recognition},
author = {Naveen Srinivasamurthy and Shrikanth Narayanan and Antonio Ortega},
url = {http://ict.usc.edu/pubs/Use%20of%20Model%20Transformations%20for%20Distributed%20Speech%20Recognition.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {4th ISCA Tutorial and Research Workshop on Speech Synthesis},
pages = {113–116},
address = {Sophia Antipolis, France},
abstract = {Due to bandwidth limitations, the speech recognizer in distributed speech recognition (DSR) applications has to use encoded speech - either traditional speech encoding or speech encoding optimized for recognition. The penalty incurred in reducing the bitrate is degradation in speech recognition performance. The diversity of the applications using DSR implies that a variety of speech encoders can be used to compress speech. By treating the encoder variability as a mismatch we propose using model transformation to reduce the speech recognition performance degradation. The advantage of using model transformation is that only a single model set needs to be trained at the server, which can be adapted on the fly to the input speech data. We were able to reduce the word error rate by 61.9%, 63.3% and 56.3% for MELP, GSM and MFCC-encoded data, respectively, by using MAP adaptation, which shows the generality of our proposed scheme.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Damiano, Rossana; Traum, David
Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems Proceedings Article
In: NAACL 2001 Workshop on Adaptation in Dialogue Systems, 2001.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{damiano_anticipatory_2001,
title = {Anticipatory planning for decision-theoretic grounding and task advancement in mixed-initiative dialogue systems},
author = {Rossana Damiano and David Traum},
url = {http://ict.usc.edu/pubs/Anticipatory%20planning%20for%20decision-theoretic%20grounding%20and%20task%20advancement%20in%20mixed-initiative%20dialogue%20systems.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {NAACL 2001 Workshop on Adaptation in Dialogue Systems},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations Proceedings Article
In: Proceedings of 23rd Annual Conference of the Cognitive Science Society, Edinburgh, Scotland, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2001,
title = {Modeling the Interplay of Emotions and Plans in Multi-Agent Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20of%20Emotions%20and%20Plans%20in%20Multi-Agent%20Simulations.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 23rd Annual Conference of the Cognitive Science Society},
address = {Edinburgh, Scotland},
abstract = {The goal of this research is to create general computational models of the interplay between affect, cognition and behavior. These models are being designed to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. We attempt to capture both the cognitive and behavioral aspects of emotion, circumscribed to the role emotions play in the performance of concrete physical tasks. We address how emotions arise from an evaluation of the relationship between environmental events and an agent's plans and goals, as well as the impact of emotions on behavior, in particular the impact on the physical expressions of emotional state through suitable choice of gestures and body language. The approach is illustrated within a virtual reality training environment.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Dai; Ai, Hongmei; Kyriakakis, Chris; Kuo, C. -C. Jay
Adaptive Karhunen-Loeve Transform for Enhanced Multichannel Audio Coding Proceedings Article
In: SPIE, San Diego, CA, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{yang_adaptive_2001,
title = {Adaptive Karhunen-Loeve Transform for Enhanced Multichannel Audio Coding},
author = {Dai Yang and Hongmei Ai and Chris Kyriakakis and C. -C. Jay Kuo},
url = {http://ict.usc.edu/pubs/Adaptive%20Karhunen-Loeve%20Transform%20for%20Enhanced%20Multichannel%20Audio%20Coding.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SPIE},
address = {San Diego, CA},
abstract = {A modified MPEG Advanced Audio Coding (AAC) scheme based on the Karhunen-Loeve transform (KLT) to remove inter-channel redundancy, which is called the MAACKL method, has been proposed in our previous work. However, a straightforward coding of elements of the KLT matrix generates about 240 bits per matrix for typical 5 channel audio contents. Such an overhead is too expensive so that it prevents MAACKL from updating KLT dynamically in a short period of time. In this research, we study the de-correlation efficiency of adaptive KLT as well as an efficient way to encode elements of the KLT matrix via vector quantization. The effect due to different quantization accuracy and adaptation period is examined carefully. It is demonstrated that with the smallest possible number of bits per matrix and a moderately long KLT adaptation time, the MAACKL algorithm can still generate a very good coding performance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sadek, Ramy; Miraglia, Dave; Morie, Jacquelyn
3D Sound Design and Technology for the Sensory Environments Evaluations Project: Phase 1 Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2001, 2001.
@techreport{sadek_3d_2001,
title = {3D Sound Design and Technology for the Sensory Environments Evaluations Project: Phase 1},
author = {Ramy Sadek and Dave Miraglia and Jacquelyn Morie},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2001.pdf},
year = {2001},
date = {2001-01-01},
number = {ICT TR 01.2001},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
2000
Debevec, Paul; Hawkins, Tim; Tchou, Chris; Duiker, Haarm-Pieter; Sarokin, Westley
Acquiring the Reflectance Field of a Human Face Proceedings Article
In: SIGGRAPH, New Orleans, LA, 2000.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_acquiring_2000,
title = {Acquiring the Reflectance Field of a Human Face},
author = {Paul Debevec and Tim Hawkins and Chris Tchou and Haarm-Pieter Duiker and Westley Sarokin},
url = {http://ict.usc.edu/pubs/Acquiring%20the%20Re%EF%AC%82ectance%20Field%20of%20a%20Human%20Face.pdf},
year = {2000},
date = {2000-07-01},
booktitle = {SIGGRAPH},
address = {New Orleans, LA},
abstract = {We present a method to acquire the reflectance field of a human face and use these measurements to render the face under arbitrary changes in lighting and viewpoint. We first acquire images of the face from a small set of viewpoints under a dense sampling of incident illumination directions using a light stage. We then construct a reflectance function image for each observed image pixel from its values over the space of illumination directions. From the reflectance functions, we can directly generate images of the face from the original viewpoints in any form of sampled or computed illumination. To change the viewpoint, we use a model of skin reflectance to estimate the appearance of the reflectance functions for novel viewpoints. We demonstrate the technique with synthetic renderings of a person's face under novel illumination and viewpoints.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
Selective Signal Cancellation for Multiple Listener Audio Applications: An Information Theory Approach Proceedings Article
In: IEEE International Conference Multimedia and Expo, New York, NY, 2000.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_selective_2000,
title = {Selective Signal Cancellation for Multiple Listener Audio Applications: An Information Theory Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/SELECTIVE%20SIGNAL%20CANCELLATION%20FOR%20MULTIPLE-LISTENER%20AUDIO%20APPLICATIONS-%20AN%20INFORMATION%20THEORY%20APPROACH.pdf},
year = {2000},
date = {2000-07-01},
booktitle = {IEEE International Conference Multimedia and Expo},
address = {New York, NY},
abstract = {Selectively canceling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, teleconferencing, office, industrial and other applications. The traditional noise cancellation approach is impractical for such applications because it requires sensors that must be placed on the listeners. In this paper we propose an alternative method to minimize signal power in a given location and maximize signal power in another location of interest. A key advantage of this approach would be the need to eliminate sensors. We investigate the use of an information theoretic criterion known as mutual information to design filter coefficients that selectively cancel a signal in one audio channel, and transmit it in another (complementary) channel. Our results show an improvement in power gain at one location in the room relative to the other.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Human-like behavior, alas, demands human-like intellect Proceedings Article
In: Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents, Barcelona, Spain, 2000.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_human-like_2000,
title = {Human-like behavior, alas, demands human-like intellect},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Human-like%20behavior%20alas%20demands%20human-like%20intellect.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Agents 2000 Workshop on Achieving Human-like Behavior in Interactive Animated Agents},
address = {Barcelona, Spain},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Moutchtaris, Athanasios; Reveliotis, Panagiotis; Kyriakakis, Chris
Inverse Filter Design for Immersive Audio Rendering Over Loudspeakers Journal Article
In: IEEE Transactions on Multimedia, vol. 2, no. 2, pp. 77–87, 2000.
Abstract | Links | BibTeX | Tags:
@article{moutchtaris_inverse_2000,
title = {Inverse Filter Design for Immersive Audio Rendering Over Loudspeakers},
author = {Athanasios Moutchtaris and Panagiotis Reveliotis and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Inverse%20Filter%20Design%20for%20Immersive%20Audio%20Rendering%20Over%20Loudspeakers.pdf},
year = {2000},
date = {2000-06-01},
journal = {IEEE Transactions on Multimedia},
volume = {2},
number = {2},
pages = {77–87},
abstract = {Immersive audio systems can be used to render virtual sound sources in three-dimensional (3-D) space around a listener. This is achieved by simulating the head-related transfer function (HRTF) amplitude and phase characteristics using digital filters. In this paper, we examine certain key signal processing considerations in spatial sound rendering over headphones and loudspeakers. We address the problem of crosstalk inherent in loudspeaker rendering and examine two methods for implementing crosstalk cancellation and loudspeaker frequency response inversion in real time. We demonstrate that it is possible to achieve crosstalk cancellation of 30 dB using both methods, but one of the two (the Fast RLS Transversal Filter Method) offers a significant advantage in terms of computational efficiency. Our analysis is easily extendable to nonsymmetric listening positions and moving listeners.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.; Gratch, Jonathan; Rosenbloom, Paul
Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Social Simulation, Virtual Humans
@inproceedings{hill_flexible_2000,
title = {Flexible Group Behavior: Virtual Commanders for Synthetic Battlespaces},
author = {Randall W. Hill and Jonathan Gratch and Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Flexible%20Group%20Behavior-%20Virtual%20Commanders%20for%20Synthetic%20Battlespaces.pdf},
year = {2000},
date = {2000-06-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
address = {Barcelona, Spain},
abstract = {This paper describes a project to develop autonomous commander agents for synthetic battlespaces. The commander agents plan missions, monitor their execution, and replan when necessary. To reason about the social aspects of group behavior, the commanders take various social stances that enable them to collaborate with friends, exercise or defer to authority, and thwart their foes. The purpose of this paper is to describe these capabilities and how they came to be through a series of lessons learned while developing autonomous agents for this domain.},
keywords = {CogArch, Cognitive Architecture, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Gratch, Jonathan
How Long Can an Agent Look Away From a Target? Proceedings Article
In: 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_how_2000,
title = {How Long Can an Agent Look Away From a Target?},
author = {Youngjun Kim and Randall W. Hill and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/how%20long%20can%20you%20look%20away%20from%20a%20target.pdf},
year = {2000},
date = {2000-05-01},
booktitle = {9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Situation awareness (SA) is the perception of the elements in the environment within a volume of time and space, the comprehension of their meaning, and the projection of their status in the near future [3]. Although the impact of situation awareness and assessment on humans in complex systems is clear, no one theory for SA has been developed. A critical aspect of the SA problem is that agents must construct an overall view of a dynamically changing world using limited sensor channels. For instance, a (virtual) pilot, who visually tracks the location and direction of several vehicles that he cannot see simultaneously, must shift its visual field of view to scan the environment and to sense the situation involved. How he directs his attention, for how long, and how he efficiently reacquires targets is the central question we address in this paper. We describe the perceptual coordination that helps a virtual pilot efficiently track one or more objects. In SA, it is important for a virtual pilot having a limited visual field of view to gather more information from its environment and to choose appropriate actions to take in the environment without losing the target.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgiou, Panayiotis G.; Kyriakakis, Chris
A Multiple Input Single Output Model for Rendering Virtual Sound Sources in Real Time Proceedings Article
In: Proceedings of ICME 2000, New York, NY, 2000.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgiou_multiple_2000,
title = {A Multiple Input Single Output Model for Rendering Virtual Sound Sources in Real Time},
author = {Panayiotis G. Georgiou and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/A%20MULTIPLE%20INPUT%20SINGLE%20OUTPUT%20MODEL%20FOR%20RENDERING%20VIRTUAL%20SOUND%20SOURCES%20IN%20REAL%20TIME.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of ICME 2000},
address = {New York, NY},
abstract = {Accurate localization of sound in 3-D space is based on variations in the spectrum of sound sources. These variations arise mainly from reflection and diffraction effects caused by the pinnae and are described through a set of Head-Related Transfer Functions (HRTF’s) that are unique for each azimuth and elevation angle. A virtual sound source can be rendered in the desired location by filtering with the corresponding HRTF for each ear. Previous work on HRTF modeling has mainly focused on the methods that attempt to model each transfer function individually. These methods are generally computationally-complex and cannot be used for real-time spatial rendering of multiple moving sources. In this work we provide an alternative approach, which uses a multiple input single output state space system to creat a combined model of the HRTF’s for all directions. This method exploits the similarities among the different HRTF’s to achieve a significant reduction in the model size with a minimum loss of accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Èmile: Marshalling Passions in Training and Education Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, pp. 325–332, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_emile_2000,
title = {Èmile: Marshalling Passions in Training and Education},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emile-%20Marshalling%20Passions%20in%20Training%20and%20Education.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
pages = {325–332},
address = {Barcelona, Spain},
abstract = {Emotional reasoning can be an important contribution to automated tutoring and training systems. This paper describes �mile, a model of emotional reasoning that builds upon existing approaches and significantly generalizes and extends their capabilities. The main contribution is to show how an explicit planning model allows a more general treatment of several stages of the reasoning process. The model supports educational applications by allowing agents to appraise the emotional significance of events as they relate to students' (or their own) plans and goals, model and predict the emotional state of others, and alter behavior accordingly.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}