Publications
Search
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Building Trust in a Human-Robot Team with Automatically Generated Explanations Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
@inproceedings{wang_building_2015,
title = {Building Trust in a Human-Robot Team with Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Technological advances offer the promise of robotic systems that work with people to form human-robot teams that are more capable than their individual members. Unfortunately, the increasing capability of such autonomous systems has often failed to increase the capability of the human-robot team. Studies have identified many causes underlying these failures, but one critical aspect of a successful human-machine interaction is trust. When robots are more suited than humans for a certain task, we want the humans to trust the robots to perform that task. When the robots are less suited, we want the humans to appropriately gauge the robots’ ability and have people perform the task manually. Failure to do so results in disuse of robots in the former case and misuse in the latter. Real-world case studies and laboratory experiments show that failures in both cases are common. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies show that explanations offered by an automated system can help maintain trust with the humans in case the system makes an error, indicating that the robot’s communication transparency can be an important factor in earning an appropriate level of trust. To study how robots can communicate their decisionmaking process to humans, we have designed an agent-based online test-bed that supports virtual simulation of domain-independent human-robot interaction. In the simulation, humans work together with virtual robots as a team. The test-bed allows researchers to conduct online human-subject studies and gain better understanding of how robot communication can improve human-robot team performance by fostering better trust relationships between humans and their robot teammates. In this paper, we describe the details of our design, and illustrate its operation with an example human-robot team reconnaissance task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Merchant, Chirag
Toward Acquiring a Human Behavior Model of Competition vs. Cooperation Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
@inproceedings{pynadath_toward_2015,
title = {Toward Acquiring a Human Behavior Model of Competition vs. Cooperation},
author = {David V. Pynadath and Ning Wang and Chirag Merchant},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {One of the challenges in modeling human behavior is accurately capturing the conditions under which people will behave selfishly or selflessly. Researchers have been unable to craft purely cooperative (or competitive) scenarios without significant numbers of subjects displaying unintended selfish (or selfless) behavior (e.g., Rapoport & Chammah, 1965). In this work, rather than try to further isolate competitive vs. cooperative behavior, we instead construct an experimental setting that deliberately includes both, in a way that fits within an operational simulation model. Using PsychSim, a multiagent social simulation framework with both Theory of Mind and decision theory, we have implemented an online resource allocation game called “Team of Rivals”, where four players seek to defeat a common enemy. The players have individual pools of resources which they can allocate toward that common goal. In addition to their progress toward this common goal, the players also receive individual feedback, in terms of the number of resources they own and have won from the enemy. By giving the players both an explicit cooperative goal and implicit feedback on potential competitive goals, we give them room to behave anywhere on the spectrum between these two extremes. Furthermore, by moving away from the more common two-player laboratory settings (e.g., Prisoner’s Dilemma), we can observe differential behavior across the richer space of possible interpersonal relationships. We discuss the design of the game that allows us to observe and analyze these relationships from human behavior data acquired through this game. We then describe decision-theoretic agents that can simulate hypothesized variations on human behavior. Finally, we present results of a preliminary playtest of the testbed and discuss the gathered data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew S.
Creative Help: A Story Writing Assistant Book Section
In: Interactive Storytelling, vol. 9445, pp. 81–92, Springer International Publishing, Copenhagen, Denmark, 2015, ISBN: 978-3-319-27036-4.
@incollection{roemmele_creative_2015,
title = {Creative Help: A Story Writing Assistant},
author = {Melissa Roemmele and Andrew S. Gordon},
url = {http://link.springer.com/10.1007/978-3-319-27036-4_8},
isbn = {978-3-319-27036-4},
year = {2015},
date = {2015-12-01},
booktitle = {Interactive Storytelling},
volume = {9445},
pages = {81–92},
publisher = {Springer International Publishing},
address = {Copenhagen, Denmark},
abstract = {We present Creative Help, an application that helps writers by generating suggestions for the next sentence in a story as it being written. Users can modify or delete suggestions according to their own vision of the unfolding narrative. The application tracks users' changes to suggestions in order to measure their perceived helpfulness to the story, with fewer edits indicating more helpful suggestions. We demonstrate how the edit distance between a suggestion and its resulting modi⬚cation can be used to comparatively evaluate di⬚erent models for generating suggestions. We describe a generation model that uses case-based reasoning to find relevant suggestions from a large corpus of stories. The application shows that this model generates suggestions that are more helpful than randomly selected suggestions at a level of marginal statistical signifcance. By giving users control over the generated content, Creative Help provides a new opportunity in open-domain interactive storytelling.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
McAlinden, Ryan; Suma, Evan; Grechkin, Timofey; Enloe, Michael
Procedural Reconstruction of Simulation Terrain Using Drones Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
@inproceedings{mcalinden_procedural_2015,
title = {Procedural Reconstruction of Simulation Terrain Using Drones},
author = {Ryan McAlinden and Evan Suma and Timofey Grechkin and Michael Enloe},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Photogrammetric techniques for constructing 3D virtual environments have previously been plagued by expensive equipment, imprecise and visually unappealing results. However, with the introduction of low-cost, off-the-shelf (OTS) unmanned aerial systems (UAS), lighter and capable cameras, and more efficient software techniques for reconstruction, the modeling and simulation (M&S) community now has available to it new types of virtual assets that are suited for modern-day games and simulations. This paper presents an approach for fully autonomously collecting, processing, storing and rendering highly-detailed geo-specific terrain data using these OTS techniques and methods. We detail the types of equipment used, the flight parameters, the processing and reconstruction pipeline, and finally the results of using the dataset in a game/simulation engine. A key objective of the research is procedurally segmenting the terrain into usable features that the engine can interpret – i.e. distinguishing between roads, buildings, vegetation, etc. This allows the simulation core to assign attributes related to physics, lighting, collision cylinders and navigation meshes that not only support basic rendering of the model but introduce interaction with it. The results of this research are framed in the context of a new paradigm for geospatial collection, analysis and simulation. Specifically, the next generation of M&S systems will need to integrate environmental representations that have higher detail and richer metadata while ensuring a balance between performance and usability.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatterjee, Moitreya; Park, Sunghyun; Morency, Louis-Philippe; Scherer, Stefan
Combining Two Perspectives on Classifying Multimodal Data for Recognizing Speaker Traits Proceedings Article
In: Proceedings of the 2015 ACM on International Conference on Multimodal Interaction, pp. 7–14, ACM Press, Seattle, Washington, 2015, ISBN: 978-1-4503-3912-4.
@inproceedings{chatterjee_combining_2015,
title = {Combining Two Perspectives on Classifying Multimodal Data for Recognizing Speaker Traits},
author = {Moitreya Chatterjee and Sunghyun Park and Louis-Philippe Morency and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2818346.2820747},
doi = {10.1145/2818346.2820747},
isbn = {978-1-4503-3912-4},
year = {2015},
date = {2015-11-01},
booktitle = {Proceedings of the 2015 ACM on International Conference on Multimodal Interaction},
pages = {7–14},
publisher = {ACM Press},
address = {Seattle, Washington},
abstract = {Human communication involves conveying messages both through verbal and non-verbal channels (facial expression, gestures, prosody, etc.). Nonetheless, the task of learning these patterns for a computer by combining cues from multiple modalities is challenging because it requires effective representation of the signals and also taking into consideration the complex interactions between them. From the machine learning perspective this presents a two-fold challenge: a) Modeling the intermodal variations and dependencies; b) Representing the data using an apt number of features, such that the necessary patterns are captured but at the same time allaying concerns such as over-fitting. In this work we attempt to address these aspects of multimodal recognition, in the context of recognizing two essential speaker traits, namely passion and credibility of online movie reviewers. We propose a novel ensemble classification approach that combines two different perspectives on classifying multimodal data. Each of these perspectives attempts to independently address the two-fold challenge. In the first, we combine the features from multiple modalities but assume inter-modality conditional independence. In the other one, we explicitly capture the correlation between the modalities but in a space of few dimensions and explore a novel clustering based kernel similarity approach for recognition. Additionally, this work investigates a recent technique for encoding text data that captures semantic similarity of verbal content and preserves word-ordering. The experimental results on a recent public dataset shows significant improvement of our approach over multiple baselines. Finally, we also analyze the most discriminative elements of a speaker's non-verbal behavior that contribute to his/her perceived credibility/passionateness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Casas, Dan; Shapiro, Ari
Avatar Reshaping and Automatic Rigging Using a Deformable Model Proceedings Article
In: Proceedings of the 8th ACM SIGGRAPH Conference on Motion in Games, pp. 57–64, ACM Press, Paris, France, 2015, ISBN: 978-1-4503-3991-9.
@inproceedings{feng_avatar_2015,
title = {Avatar Reshaping and Automatic Rigging Using a Deformable Model},
author = {Andrew Feng and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2822013.2822017},
doi = {10.1145/2822013.2822017},
isbn = {978-1-4503-3991-9},
year = {2015},
date = {2015-11-01},
booktitle = {Proceedings of the 8th ACM SIGGRAPH Conference on Motion in Games},
pages = {57–64},
publisher = {ACM Press},
address = {Paris, France},
abstract = {3D scans of human figures have become widely available through online marketplaces and have become relatively easy to acquire using commodity scanning hardware. In addition to static uses of such 3D models, such as 3D printed figurines or rendered 3D still imagery, there are numerous uses for an animated 3D character that uses such 3D scan data. In order to effectively use such models as dynamic 3D characters, the models must be properly rigged before they are animated. In this work, we demonstrate a method to automatically rig a 3D mesh by matching a set of morphable models against the 3D scan. Once the morphable model has been matched against the 3D scan, the skeleton position and skinning attributes are then copied, resulting in a skinning and rigging that is similar in quality to the original hand-rigged model. In addition, the use of a morphable model allows us to reshape and resize the 3D scan according to approximate human proportions. Thus, a human 3D scan can be modified to be taller, shorter, fatter or skinnier. Such manipulations of the 3D scan are useful both for social science research, as well as for visualization for applications such as fitness, body image, plastic surgery and the like.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Papaefthymiou, Margarita; Feng, Andrew; Shapiro, Ari; Papagiannakis, George
A fast and robust pipeline for populating mobile AR scenes with gamified virtual characters Proceedings Article
In: SIGGRAPH Asia 2015, pp. 1–8, ACM Press, Kobe, Japan, 2015, ISBN: 978-1-4503-3928-5.
@inproceedings{papaefthymiou_fast_2015,
title = {A fast and robust pipeline for populating mobile AR scenes with gamified virtual characters},
author = {Margarita Papaefthymiou and Andrew Feng and Ari Shapiro and George Papagiannakis},
url = {http://dl.acm.org/citation.cfm?doid=2818427.2818463},
doi = {10.1145/2818427.2818463},
isbn = {978-1-4503-3928-5},
year = {2015},
date = {2015-11-01},
booktitle = {SIGGRAPH Asia 2015},
pages = {1–8},
publisher = {ACM Press},
address = {Kobe, Japan},
abstract = {In this work we present a complete methodology for robust authoring of AR virtual characters powered from a versatile character animation framework (Smartbody), using only mobile devices. We can author, fully augment with life-size, animated, geometrically accurately registered virtual characters into any open space in less than 1 minute with only modern smartphones or tablets and then automatically revive this augmentation for subsequent activations from the same spot, in under a few seconds. Also, we handle efficiently scene authoring rotations of the AR objects using Geometric Algebra rotors in order to extract higher quality visual results. Moreover, we have implemented a mobile version of the global illumination for real-time Precomputed Radiance Transfer algorithm for diffuse shadowed characters in real-time, using High Dynamic Range (HDR) environment maps integrated in our opensource OpenGL Geometric Application (glGA) framework. Effective character interaction plays fundamental role in attaining high level of believability and makes the AR application more attractive and immersive based on the SmartBody framework.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
Physical Space Requirements for Redirected Walking: How Size and Shape Affect Performance Proceedings Article
In: Eurographics Symposium on Virtual Environments (2015), pp. 93–100, The Eurographics Association, Kyoto, Japan, 2015, ISBN: 978-3-905674-84-2.
@inproceedings{azmandian_physical_2015,
title = {Physical Space Requirements for Redirected Walking: How Size and Shape Affect Performance},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {https://diglib.eg.org/handle/10.2312/13833},
doi = {10.2312/egve.20151315},
isbn = {978-3-905674-84-2},
year = {2015},
date = {2015-10-01},
booktitle = {Eurographics Symposium on Virtual Environments (2015)},
pages = {93–100},
publisher = {The Eurographics Association},
address = {Kyoto, Japan},
abstract = {Redirected walking provides a compelling solution to explore large virtual environments in a natural way. However, research literature provides few guidelines regarding trade-offs involved in selecting size and layout for physical tracked space. We designed a rigorously controlled benchmarking framework and conducted two simulated user experiments to systematically investigate how the total area and dimensions of the tracked space affect performance of steer-to-center and steer-to-orbit algorithms. The results indicate that minimum viable size of physical tracked space for these redirected walking algorithms is approximately 6m 6m with performance continuously improving in larger tracked spaces. At the same time, no ”optimal” tracked space size can guarantee the absence of contacts with the boundary. We also found that square tracked spaces enabled best overall performance with steer-to-center algorithm also performing well in moderately elongated rectangular spaces. Finally, we demonstrate that introducing translation gains can provide a useful boost in performance, particularly when physical space is constrained. We conclude with the discussion of potential applications of our benchmarking toolkit to other problems related to performance of redirected walking platforms.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatterjee, Moitreya; Leuski, Anton
A Novel Statistical Approach for Image and Video Retrieval and Its Adaption for Active Learning Book Section
In: A Novel Statistical Approach for Image and Video Retrieval and Its Adaption for Active Learning, pp. 935–938, ACM, Brisbane, Australia, 2015, ISBN: 978-1-4503-3459-4.
@incollection{chatterjee_novel_2015,
title = {A Novel Statistical Approach for Image and Video Retrieval and Its Adaption for Active Learning},
author = {Moitreya Chatterjee and Anton Leuski},
url = {http://dl.acm.org/citation.cfm?id=2806368},
isbn = {978-1-4503-3459-4},
year = {2015},
date = {2015-10-01},
booktitle = {A Novel Statistical Approach for Image and Video Retrieval and Its Adaption for Active Learning},
pages = {935–938},
publisher = {ACM},
address = {Brisbane, Australia},
abstract = {The ever expanding multimedia content (such as images and videos), especially on the web, necessitates e⬚ective text query-based search (or retrieval) systems. Popular approaches for addressing this issue, use the query-likelihood model which fails to capture the user's information needs. In this work therefore, we explore a new ranking approach in the context of image and video retrieval from text queries. Our approach assumes two separate underlying distributions for query and the document respectively. We then, determine the extent of similarity between these two statistical distributions for the task of ranking. Furthermore we extend our approach, using Active Learning techniques, to address the question of obtaining a good performance without requiring a fully labeled training dataset. This is done by taking Sample Uncertainty, Density and Diversity into account. Our experiments on the popular TRECVID corpus and the open, relatively small-sized USC SmartBody corpus show that we are almost at-par or sometimes better than multiple state-of-the-art baselines.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Kang, Sin-Hwa; Feng, Andrew W.; Leuski, Anton; Casas, Dan; Shapiro, Ari
The Effect of An Animated Virtual Character on Mobile Chat Interactions Book Section
In: Proceedings of the 3rd International Conference on Human-Agent Interaction, pp. 105–112, ACM, Daegu, Korea, 2015, ISBN: 978-1-4503-3527-0.
@incollection{kang_effect_2015,
title = {The Effect of An Animated Virtual Character on Mobile Chat Interactions},
author = {Sin-Hwa Kang and Andrew W. Feng and Anton Leuski and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?id=2814957},
isbn = {978-1-4503-3527-0},
year = {2015},
date = {2015-10-01},
booktitle = {Proceedings of the 3rd International Conference on Human-Agent Interaction},
pages = {105–112},
publisher = {ACM},
address = {Daegu, Korea},
abstract = {This study explores presentation techniques for a 3D animated chat-based virtual human that communicates engagingly with users. Interactions with the virtual human occur via a smartphone outside of the lab in natural settings. Our work compares the responses of users who interact with no image or a static image of a virtual character as opposed to the animated visage of a virtual human capable of displaying appropriate nonverbal behavior. We further investigate users’ responses to the animated character’s gaze aversion which displayed the character’s act of looking away from users and was presented as a listening behavior. The findings of our study demonstrate that people tend to engage in conversation more by talking for a longer amount of time when they interact with a 3D animated virtual human that averts its gaze, compared to an animated virtual human that does not avert its gaze, a static image of a virtual character, or an audio-only interface.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Brilman, Maarten; Scherer, Stefan
A Multimodal Predictive Model of Successful Debaters or How I Learned to Sway Votes Proceedings Article
In: Proceedings of ACM Multimedia 2015, pp. 149–158, ACM, Brisbane, Australia, 2015, ISBN: 978-1-4503-3459-4.
@inproceedings{brilman_multimodal_2015,
title = {A Multimodal Predictive Model of Successful Debaters or How I Learned to Sway Votes},
author = {Maarten Brilman and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?id=2806245},
doi = {10.1145/2733373.2806245},
isbn = {978-1-4503-3459-4},
year = {2015},
date = {2015-10-01},
booktitle = {Proceedings of ACM Multimedia 2015},
pages = {149–158},
publisher = {ACM},
address = {Brisbane, Australia},
abstract = {Interpersonal skills such as public speaking are essential assets for a large variety of professions and in everyday life. The ability to communicate in social environments often greatly in uences a person's career development, can helpresolve con ict, gain the upper hand in negotiations, or sway the public opinion. We focus our investigations on a special form of public speaking, namely public debates of socioeconomic issues that a⬚ect us all. In particular, we analyze performances of expert debaters recorded through the Intelligence Squared U.S. (IQ2US) organization. IQ2US collects high-quality audiovisual recordings of these debates and publishes them online free of charge. We extract audiovisual nonverbal behavior descriptors, including facial expressions, voice quality characteristics, and surface level linguistic characteristics. Within our experiments we investigate if it is possible to automatically predict if a debater or his/her team are going to sway the most votes after the debate using multimodal machine learning and fusion approaches. We identify unimodal nonverbal behaviors that characterize successful debaters and our investigations reveal that multimodal machine learning approaches can reliably predict which individual (⬚75% accuracy) or team (85% accuracy) is going to win the most votes in the debate. We created a database consisting of over 30 debates with four speakers per debate suitable for public speaking skill analysis and plan to make this database publicly available for the research community.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Yuqiong; Lucas, Gale; Khooshabeh, Peter; Melo, Celso; Gratch, Jonathan
Effects of emotional expressions on persuasion Journal Article
In: Social Influence, vol. 10, no. 4, pp. 236–249, 2015, ISSN: 1553-4510, 1553-4529.
@article{wang_effects_2015,
title = {Effects of emotional expressions on persuasion},
author = {Yuqiong Wang and Gale Lucas and Peter Khooshabeh and Celso Melo and Jonathan Gratch},
url = {http://www.tandfonline.com/doi/full/10.1080/15534510.2015.1081856},
doi = {10.1080/15534510.2015.1081856},
issn = {1553-4510, 1553-4529},
year = {2015},
date = {2015-10-01},
journal = {Social Influence},
volume = {10},
number = {4},
pages = {236–249},
abstract = {This paper investigates how expressions of emotion affect persuasiveness when the expresser and the recipient have different levels of power. The first study demonstrates that when the recipient overpowers the expresser, emotional expressions reduce persuasion. A second study reveals that power and perceived appropriateness of emotional expressions independently moderate the effect of emotional expressions. Emotional expressions hamper persuasion when the recipient overpowers the expresser, or when the emotional expressions are considered inappropriate.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert "Skip"; Shilling, Russell; Forbell, Eric; Scherer, Stefan; Gratch, Jonathan; Morency, Louis-Philippe
Autonomous Virtual Human Agents for Healthcare Information Support and Clinical Interviewing Book Section
In: pp. 53–79, Elsevier, Inc., Philadelphia, PA, 2015, ISBN: 978-0-12-420248-1.
@incollection{rizzo_autonomous_2015,
title = {Autonomous Virtual Human Agents for Healthcare Information Support and Clinical Interviewing},
author = {Albert "Skip" Rizzo and Russell Shilling and Eric Forbell and Stefan Scherer and Jonathan Gratch and Louis-Philippe Morency},
url = {http://www.sciencedirect.com/science/article/pii/B9780124202481000039},
isbn = {978-0-12-420248-1},
year = {2015},
date = {2015-10-01},
pages = {53–79},
publisher = {Elsevier, Inc.},
address = {Philadelphia, PA},
abstract = {Over the last 20 years, a virtual revolution has taken place in the use of Virtual Reality simulation technology for clinical purposes. Recent shifts in the social and scientific landscape have now set the stage for the next major movement in Clinical Virtual Reality with the “birth” of intelligent virtual human (VH) agents. Seminal research and development has appeared in the creation of highly interactive, artificially intelligent and natural language capable VHs that can engage real human users in a credible fashion. VHs can now be designed to perceive and act in a virtual world, engage in face-to-face spoken dialogues, and in some cases they are capable of exhibiting human-like emotional reactions. This chapter will detail our applications in this area where a virtual human can provide private online healthcare information and support (i.e., SimCoach) and where a VH can serve the role as a clinical interviewer (i.e., SimSensei).},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Kang, Sin-Hwa; Krum, David M.; Phan, Thai; Bolas, Mark
"Hi, It's Me Again!": Virtual Coaches over Mobile Video Proceedings Article
In: Proceedings of the 3rd International Conference on Human-Agent Interaction, pp. 183–186, ACM, Daegu, Korea, 2015, ISBN: 978-1-4503-3527-0.
@inproceedings{kang_hi_2015,
title = {"Hi, It's Me Again!": Virtual Coaches over Mobile Video},
author = {Sin-Hwa Kang and David M. Krum and Thai Phan and Mark Bolas},
url = {http://dl.acm.org/citation.cfm?id=2814970},
isbn = {978-1-4503-3527-0},
year = {2015},
date = {2015-10-01},
booktitle = {Proceedings of the 3rd International Conference on Human-Agent Interaction},
pages = {183–186},
publisher = {ACM},
address = {Daegu, Korea},
abstract = {We believe that virtual humans presented over video chat services, such as Skype via smartphones, can be an effective way to deliver innovative applications where social interactions are important, such as counseling and coaching. We hypothesize that the context of a smartphone communication channel, i.e. how a virtual human is presented within a smartphone app, and indeed, the nature of that app, can profoundly affect how a real human perceives the virtual human. We have built an apparatus that allows virtual humans to initiate, receive, and interact over video calls using Skype or any similar service. With this platform, we are examining effective designs and social implications of virtual humans that interact over mobile video. The current study examines a relationship involving repeated counseling-style interactions with a virtual human, leveraging the virtual human’s ability to call and interact with a real human on multiple occasions over a period of time. The results and implications of this preliminary study suggest that repeated interactions may improve perceived social characteristics of the virtual human.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Scherer, Stefan; Oiumette, Brett; Ryan, William S.; Lance, Brent J.; Gratch, Jonathan
Computational-based behavior analysis and peripheral psychophysiology Journal Article
In: Advances in Computational Psychophysiology, pp. 34–36, 2015.
@article{khooshabeh_computational-based_2015,
title = {Computational-based behavior analysis and peripheral psychophysiology},
author = {Peter Khooshabeh and Stefan Scherer and Brett Oiumette and William S. Ryan and Brent J. Lance and Jonathan Gratch},
url = {http://www.sciencemag.org/sites/default/files/custom-publishing/documents/CP_Supplement_Final_100215.pdf},
year = {2015},
date = {2015-10-01},
journal = {Advances in Computational Psychophysiology},
pages = {34–36},
abstract = {Computational-based behavior analysis aims to automatically identify, characterize, model, and synthesize multimodal nonverbal behavior within both human–machine as well as machine-mediated human–human interaction. It uses state-of-the-art machine learning algorithms to track human nonverbal and verbal information, such as facial expressions, gestures, and posture, as well as what and how a person speaks. The emerging technology from this field of research is relevant for a wide range of interactive and social applications, including health care and education. The characterization and association of nonverbal behavior with underlying clinical conditions, such as depression or posttraumatic stress, could have significant benefits for treatments and the overall efficiency of the health care system.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Paetzel, Maike; Manuvinakurike, Ramesh; DeVault, David
"So, which one is it?" The effect of alternative incremental architectures in a high-performance game-playing agent Proceedings Article
In: Proceedings of SIGDIAL 2015, pp. 77 – 86, Prague, Czech Republic, 2015.
@inproceedings{paetzel_so_2015,
title = {"So, which one is it?" The effect of alternative incremental architectures in a high-performance game-playing agent},
author = {Maike Paetzel and Ramesh Manuvinakurike and David DeVault},
url = {http://ict.usc.edu/pubs/So,%20which%20one%20is%20it%20-%20The%20effect%20of%20alternative%20incremental%20architectures%20in%20a%20high-performance%20game-playing%20agent.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of SIGDIAL 2015},
pages = {77 – 86},
address = {Prague, Czech Republic},
abstract = {This paper introduces Eve, a highperformance agent that plays a fast-paced image matching game in a spoken dialogue with a human partner. The agent can be optimized and operated in three different modes of incremental speech processing that optionally include incremental speech recognition, language understanding, and dialogue policies. We present our framework for training and evaluating the agent’s dialogue policies. In a user study involving 125 human participants, we evaluate three incremental architectures against each other and also compare their performance to human-human gameplay. Our study reveals that the most fully incremental agent achieves game scores that are comparable to those achieved in human-human gameplay, are higher than those achieved by partially and nonincremental versions, and are accompanied by improved user perceptions of efficiency, understanding of speech, and naturalness of interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan; Evangelista, Edgar; New, Raymond; Campbell, Julia; Richmond, Todd; McGroarty, Christopher; Vogt, Brian
Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping Proceedings Article
In: Proceeding of 15 Simulation Interoperability Workshop, Orlando, FL, 2015.
@inproceedings{spicer_innovation_2015,
title = {Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping},
author = {Ryan Spicer and Edgar Evangelista and Raymond New and Julia Campbell and Todd Richmond and Christopher McGroarty and Brian Vogt},
url = {http://ict.usc.edu/pubs/Innovation%20and%20Rapid%20Evolutionary%20Design%20by%20Virtual%20Doing-Understanding%20Early%20Synthetic.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of 15 Simulation Interoperability Workshop},
address = {Orlando, FL},
abstract = {The proliferation and maturation of tools supporting virtual environments combined with emerging immersive capabilities (e.g. Oculus Rift and other head mounted displays) point towards the ability to take nascent ideas and realize them in engaging ways through an Early Synthetic Prototyping (ESP) system. In effect, “bend electrons before bending metal,” enabling Soldier (end-user) feedback early in the design process, while fostering an atmosphere of collaboration and innovation. Simulation has been used in a variety of ways for concept, design, and testing, but current methods do not put the user into the system in ways that provide deep feedback and enable a dialogue between Warfighter and Engineer (as well as other stakeholders) that can inform design. This paper will discuss how the process of ESP is teased out by using iterative rapid virtual prototyping based on an initial ESP schema, resulting in a rather organic design process – Innovation and Rapid Evolutionary Design by Virtual Doing. By employing canonical use cases, working through the draft schema allows the system to help design itself and inform the process evolution. This type of self-referential meta-design becomes increasingly powerful and relevant given the ability to rapidly create assets, capabilities and environments that immerse developers, stakeholders, and end users early and often in the process. Specific examples of using rapid virtual prototyping for teasing out the design and implications/applications of ESP will be presented, walking through the evolution of both schema and prototypes with specific use cases. In addition, this paper will cover more generalized concepts, approaches, analytics, and lessons-learned as well as implications for innovation throughout research, development, and industry.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Lucas, Gale; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli; Nichols, Jeffrey
GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{gratch_goaalll_2015,
title = {GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion},
author = {Jonathan Gratch and Gale Lucas and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler and Jeffrey Nichols},
url = {http://ict.usc.edu/pubs/GOAALLL!%20Using%20Sentiment%20in%20the%20World%20Cup%20to%20Explore%20Theories%20of%20Emotion.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Sporting events evoke strong emotions amongst fans and thus act as natural laboratories to explore emotions and how they unfold in the wild. Computational tools, such as sentiment analysis, provide new ways to examine such dynamic emotional processes. In this article we use sentiment analysis to examine tweets posted during 2014 World Cup. Such analysis gives insight into how people respond to highly emotional events, and how these emotions are shaped by contextual factors, such as prior expectations, and how these emotions change as events unfold overtime. Here we report on some preliminary analysis of a World Cup twitter corpus using sentiment analysis techniques. We show these tools can give new insights into existing theories of what makes a sporting match exciting. This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion. We also discuss some challenges that such data present for existing sentiment analysis techniques and discuss future analysis.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale; Gratch, Jonathan; Scherer, Stefan; Boberg, Jill; Stratou, Giota
Towards an Affective Interface for Assessment of Psychological Distress Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
@inproceedings{lucas_towards_2015,
title = {Towards an Affective Interface for Assessment of Psychological Distress},
author = {Gale Lucas and Jonathan Gratch and Stefan Scherer and Jill Boberg and Giota Stratou},
url = {http://ict.usc.edu/pubs/Towards%20an%20Affective%20Interface%20for%20Assessment%20of%20Psychological%20Distress.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Even with the rise in use of TeleMedicine for health care and mental health, research suggests that clinicians may have difficulty reading nonverbal cues in computer-mediated situations. However, the recent progress in tracking affective markers (i.e., displays of emotional expressions on face and in voice) has opened the door to new clinical applications that might help health care providers better read nonverbal behaviors when employing TeleMedicine. For example, an interface that automatically quantified affective markers could assist clinicians in their assessment of and treatment for psychological distress (i.e., symptoms of depression and PTSD). To move towards this prospect, we will show that clinicians’ judgments of these nonverbal affective markers (e.g., smile, frown, eye contact, tense voice) could be informed by such technology. The results of our evaluation suggest that clinicians’ ratings of nonverbal affective markers are less predictive of psychological distress than automatically quantified affective markers. Because such quantifications are more strongly associated with psychological distress than clinician ratings of these same nonverbal behaviors, an affective interface providing quantifications of nonverbal affective markers could potentially improve assessment of psychological distress.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Honig, Wolfgang; Milanes, Christina; Scaria, Lisa; Phan, Thai; Bolas, Mark; Ayanian, Nora
Mixed Reality for Robotics Proceedings Article
In: 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 5382 – 5387, IEEE, Hamburg, Germany, 2015.
@inproceedings{honig_mixed_2015,
title = {Mixed Reality for Robotics},
author = {Wolfgang Honig and Christina Milanes and Lisa Scaria and Thai Phan and Mark Bolas and Nora Ayanian},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7354138&tag=1},
doi = {10.1109/IROS.2015.7354138},
year = {2015},
date = {2015-09-01},
booktitle = {2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {5382 – 5387},
publisher = {IEEE},
address = {Hamburg, Germany},
abstract = {Mixed Reality can be a valuable tool for research and development in robotics. In this work, we refine the definition of Mixed Reality to accommodate seamless interaction between physical and virtual objects in any number of physical or virtual environments. In particular, we show that Mixed Reality can reduce the gap between simulation and implementation by enabling the prototyping of algorithms on a combination of physical and virtual objects, including robots, sensors, and humans. Robots can be enhanced with additional virtual capabilities, or can interact with humans without sharing physical space. We demonstrate Mixed Reality with three representative experiments, each of which highlights the advantages of our approach. We also provide a testbed for Mixed Reality with three different virtual robotics environments in combination with the Crazyflie 2.0 quadcopter.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2004
Hartholt, Arno; Muller, T. J.
Interaction on Emotions Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 02.2004, 2004.
Abstract | Links | BibTeX | Tags:
@techreport{hartholt_interaction_2004,
title = {Interaction on Emotions},
author = {Arno Hartholt and T. J. Muller},
url = {http://ict.usc.edu/pubs/Interaction%20on%20emotions.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 02.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This report describes the addition of an emotion dialogue to the Mission Rehearsal Exercise (MRE) system. The goal of the MRE system is to provide an immersive learning environment for army officer recruits. The user can engage in conversation with several intelligent agents in order to accomplish the goals within a certain scenario. Although these agents did already posses emotions, they were unable to express them verbally. A question - answer dialogue has been implemented to this purpose. The implementation makes use of proposition states for modelling knowledge, keyword scanning for natural language understanding and templates for natural language generation. The system is implemented using Soar and TCL. An agent can understand emotion related questions in four different domains, type, intensity, state, and the combination of responsible-agent and blameworthiness. Some limitations arise due to the techniques used and to the relative short time frame in which the assignment was to be executed. Main issues are that the existing natural language understanding and generation modules could not be fully used, that very little context about the conversation is available and that the emotion states simplify the emotional state of an agent. These limitations and other thoughts give rise to the following recommendations for further work: * Make full use of references. * Use coping strategies for generating agent's utterances. * Use focus mechanisms for generating agent's utterances. * Extend known utterances. * Use NLU and NLG module. * Use emotion dialogue and states to influence emotions. * Fix known bugs.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gordon, Andrew S.
The Representation of Planning Strategies Journal Article
In: Artificial Intelligence, vol. 153, pp. 287–305, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@article{gordon_representation_2004,
title = {The Representation of Planning Strategies},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Representation%20of%20Planning%20Strategies.PDF},
year = {2004},
date = {2004-01-01},
journal = {Artificial Intelligence},
volume = {153},
pages = {287–305},
abstract = {An analysis of strategies, recognizable abstract patterns of planned behavior, highlights the difference between the assumptions that people make about their own planning processes and the representational commitments made in current automated planning systems. This article describes a project to collect and represent strategies on a large scale to identify the representational components of our commonsense understanding of intentional action. Three hundred and seventy-two strategies were collected from ten different planning domains. Each was represented in a pre-formal manner designed to reveal the assumptions that these strategies make concerning the human planning process. The contents of these representations, consisting of nearly one thousand unique concepts, were then collected and organized into forty-eight groups that outline the representational requirements of strategic planning systems.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {article}
}
2003
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Traum, David; Wang, D.
Transonics: A Speech to Speech System for English-Persian Interactions Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop, U.S. Virgin Islands, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{narayanan_transonics_2003,
title = {Transonics: A Speech to Speech System for English-Persian Interactions},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and David Traum and D. Wang},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2003},
date = {2003-12-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop},
address = {U.S. Virgin Islands},
abstract = {In this paper we describe the ï¬rst phase of development of our speech-to-speech system between English and Modern Persian under the DARPA Babylon program. We give an overview of the various system components: the front end ASR, the machine translation system and the speech generation system. Challenges such as the sparseness of available spoken language data and solutions that have been employed to maximize the obtained beneï¬ts from using these limited resources are examined. Efforts in the creation of the user interface and the underlying dialog management system for mediated communication are described.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
Image-Based Techniques for Digitizing Environments and Artifacts Proceedings Article
In: 4th International Conference on 3-D Digital Imaging and Modeling (3DIM), 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_image-based_2003,
title = {Image-Based Techniques for Digitizing Environments and Artifacts},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Techniques%20for%20Digitizing%20Environments%20and%20Artifacts.pdf},
year = {2003},
date = {2003-10-01},
booktitle = {4th International Conference on 3-D Digital Imaging and Modeling (3DIM)},
abstract = {This paper presents an overview of techniques for generating photoreal computer graphics models of real-world places and objects. Our group's early efforts in modeling scenes involved the development of Facade, an interactive photogrammetric modeling system that uses geometric primitives to model the scene, and projective texture mapping to produce the scene appearance properties. Subsequent work has produced techniques to model the incident illumination within scenes, which we have shown to be useful for realistically adding computer-generated objects to image-based models. More recently, our work has focussed on recovering lighting-independent models of scenes and objects, capturing how each point on an object reflects light. Our latest work combines three-dimensional range scans, digital photographs, and incident illumination measurements to produce lighting-independent models of complex objects and environments.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Robinson, Susan; Garg, Saurabh
Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio Proceedings Article
In: Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue), Saarbruecken Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_functions_2003,
title = {Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio},
author = {Bilyana Martinovski and David Traum and Susan Robinson and Saurabh Garg},
url = {http://ict.usc.edu/pubs/Functions%20and%20Patterns%20of%20Speaker%20and%20Addressee%20Identifications%20in%20Distributed%20Complex%20Organizational%20Tasks%20Over%20Radio.pdf},
year = {2003},
date = {2003-09-01},
booktitle = {Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue)},
address = {Saarbruecken Germany},
abstract = {In multiparty dialogue speakers must identify who they are addressing (at least to the addressee, and perhaps to overhearers as well). In non face-toface situations, even the speaker's identity can be unclear. For talk within organizational teams working on critical tasks, such miscommunication must be avoided, and so organizational conventions have been adopted to signal addressee and speaker, (e.g., military radio communications). However, explicit guidelines, such as provided by the military are not always exactly followed (see also (Churcher et al., 1996)). Moreover, even simple actions like identiï¬cations of speaker and hearer can be performed in a variety of ways, for a variety of purposes. The purpose of this paper is to contribute to the understanding and predictability of identiï¬cations of speaker and addressee in radio mediated organization of work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Itti, Laurent; Dhavale, Nitin; Pighin, Frédéric
Realistic Avatar Eye and Head Animation Using a Neurobiological Model of Visual Attention Proceedings Article
In: Proceedings of SPIE 48th Annual International Symposium on Optical Science and Technology, San Diego, CA, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{itti_realistic_2003,
title = {Realistic Avatar Eye and Head Animation Using a Neurobiological Model of Visual Attention},
author = {Laurent Itti and Nitin Dhavale and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Realistic%20Avatar%20Eye%20and%20Head%20Animation%20Using%20a%20Neurobiological%20Model%20of%20Visual%20Attention.pdf},
doi = {10.1117/12.512618},
year = {2003},
date = {2003-08-01},
booktitle = {Proceedings of SPIE 48th Annual International Symposium on Optical Science and Technology},
address = {San Diego, CA},
abstract = {We describe a neurobiological model of visual attention and eye/head movements in primates, and its application to the automatic animation of a realistic virtual human head watching an unconstrained variety of visual inputs. The bottom-up (image-based) attention model is based on the known neurophysiology of visual processing along the occipito-parietal pathway of the primate brain, while the eye/head movement model is derived from recordings in freely behaving Rhesus monkeys. The system is successful at autonomously saccading towards and tracking salient targets in a variety of video clips, including synthetic stimuli, real outdoors scenes and gaming console outputs. The resulting virtual human eye/head animation yields realistic rendering of the simulation results, both suggesting applicability of this approach to avatar animation and reinforcing the plausibility of the neural model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Douglas, Jay; Gordon, Andrew S.; Pighin, Frédéric; Velson, Martin
Guided Conversations about Leadership: Mentoring with Movies and Interactive Characters Proceedings Article
In: Proceedings of the 15th Innovative Applications of Artificial Intelligence Conference, Acapulco, Mexico, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{hill_guided_2003,
title = {Guided Conversations about Leadership: Mentoring with Movies and Interactive Characters},
author = {Randall W. Hill and Jay Douglas and Andrew S. Gordon and Frédéric Pighin and Martin Velson},
url = {http://ict.usc.edu/pubs/Guided%20Conversations%20about%20Leadership-%20Mentoring%20with%20Movies%20and%20Interactive%20Characters.pdf},
year = {2003},
date = {2003-08-01},
booktitle = {Proceedings of the 15th Innovative Applications of Artificial Intelligence Conference},
address = {Acapulco, Mexico},
abstract = {Think Like a Commander - Excellence in Leadership (TLAC-XL) is an application designed for learning leadership skills both from the experiences of others and through a structured dialogue about issues raised in a vignette. The participant watches a movie, interacts with a synthetic mentor and interviews characters in the story. The goal is to enable leaders to learn the human dimensions of leadership, addressing a gap in the training tools currently available to the U.S. Army. The TLAC-XL application employs a number of Artificial Intelligence technologies, including the use of a coordination architecture, a machine learning approach to natural language processing, and an algorithm for the automated animation of rendered human faces.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Kazemzadeh, Abe; Nair, Anish; Petrova, Milena
Recognizing Expressions of Commonsense Psychology in English Text Proceedings Article
In: Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL), Sapporo, Japan, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_recognizing_2003,
title = {Recognizing Expressions of Commonsense Psychology in English Text},
author = {Andrew S. Gordon and Abe Kazemzadeh and Anish Nair and Milena Petrova},
url = {http://ict.usc.edu/pubs/Recognizing%20Expressions%20of%20Commonsense%20Psychology%20in%20English%20Text.PDF},
year = {2003},
date = {2003-07-01},
booktitle = {Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL)},
address = {Sapporo, Japan},
abstract = {Many applications of natural language processing technologies involve analyzing texts that concern the psychological states and processes of people, including their beliefs, goals, predictions, explanations, and plans. In this paper, we describe our efforts to create a robust, large-scale lexical-semantic resource for the recognition and classification of expressions of commonsense psychology in English Text. We achieve high levels of precision and recall by hand-authoring sets of local grammars for commonsense psychology concepts, and show that this approach can achieve classification performance greater than that obtained by using machine learning techniques. We demonstrate the utility of this resource for large-scale corpus analysis by identifying references to adversarial and competitive goal in political speeches throughout U.S. history.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Nair, Anish
Literary Evidence for the Cultural Development of a Theory of Mind Proceedings Article
In: Proceedings of the 25th Annual Meeting of the Cognitive Science Society (CogSci), Boston, MA, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_literary_2003,
title = {Literary Evidence for the Cultural Development of a Theory of Mind},
author = {Andrew S. Gordon and Anish Nair},
url = {http://ict.usc.edu/pubs/Literary%20Evidence%20for%20the%20Cultural%20Development%20of%20a%20Theory%20of%20Mind.PDF},
year = {2003},
date = {2003-07-01},
booktitle = {Proceedings of the 25th Annual Meeting of the Cognitive Science Society (CogSci)},
address = {Boston, MA},
abstract = {The term Theory of Mind is used within the cognitive sciences to refer to the abilities that people have to reason about their own mental states and the mental states of others. An important question is whether these abilities are culturally acquired or innate to our species. This paper outlines the argument that the mental models that serve as the basis for Theory of Mind abilities are the product of cultural development. To support this thesis, we present evidence gathered from the large-scale automated analysis of text corpora. We show that the Freudian conception of a subconscious desire is a relatively modern addition to our culturally shared Theory of Mind, as evidenced by a shift in the way these ideas appeared in 19th and 20th century English language novels.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Lent, Michael; Hill, Randall W.; McAlinden, Ryan; Brobst, Paul
2002 Defense Modeling and Simulation Office (DMSO) Laboratory for Human Behavior Model Interchange Standards Technical Report
no. AFRL-HE-WP-TP-2007-0008, 2003.
Abstract | Links | BibTeX | Tags:
@techreport{van_lent_2002_2003,
title = {2002 Defense Modeling and Simulation Office (DMSO) Laboratory for Human Behavior Model Interchange Standards},
author = {Michael Lent and Randall W. Hill and Ryan McAlinden and Paul Brobst},
url = {http://ict.usc.edu/pubs/2002%20Defense%20Modeling%20and%20Simulation%20Office%20(DMSO)%20Laboratory%20for%20Human%20Behavior%20Model%20Interchange%20Standards.pdf},
year = {2003},
date = {2003-07-01},
number = {AFRL-HE-WP-TP-2007-0008},
abstract = {This report describes the effort to address the following research objective: "To begin to define, prototype, and demonstrate an interchange standard among Human Behavior Modeling (HEM) -related models in the Department of Defense (DoD), Industry, Academia, and other Government simulations by establishing a Laboratory for the Study of Human Behavior Representation Interchange Standard." With experience, expertise, and technologies of the commercial computer game industry, the academic research community, and DoD simulation developers, the Institute for Creative Technologies discusses their design and implementation for a prototype HBM interface standard and also describes their demonstration of that standard in a game-based simulation environment that combines HBM models from the entertainment industry and academic researchers.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gratch, Jonathan; Marsella, Stacy C.
Fight the Way You Train:The Role and Limits of Emotions in Training for Combat Journal Article
In: Brown Journal of World Affairs, vol. X, pp. 63–76, 2003.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_fight_2003,
title = {Fight the Way You Train:The Role and Limits of Emotions in Training for Combat},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Fight%20the%20Way%20You%20Train-The%20Role%20and%20Limits%20of%20Emotions%20in%20Training%20for%20Combat.pdf},
year = {2003},
date = {2003-06-01},
journal = {Brown Journal of World Affairs},
volume = {X},
pages = {63–76},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.; Gratch, Jonathan; Marsella, Stacy C.; Swartout, William; Traum, David
Virtual Humans in the Mission Rehearsal Exercise System Proceedings Article
In: Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents), 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{hill_virtual_2003,
title = {Virtual Humans in the Mission Rehearsal Exercise System},
author = {Randall W. Hill and Jonathan Gratch and Stacy C. Marsella and William Swartout and David Traum},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20in%20the%20Mission%20Rehearsal%20Exercise%20System.pdf},
year = {2003},
date = {2003-06-01},
booktitle = {Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents)},
abstract = {How can simulation be made more compelling and effective as a tool for learning? This is the question that the Institute for Creative Technologies (ICT) set out to answer when it was formed at the University of Southern California in 1999, to serve as a nexus between the simulation and entertainment communities. The ultimate goal of the ICT is to create the Experience Learning System (ELS), which will advance the state of the art in virtual reality immersion through use of high-resolution graphics, immersive audio, virtual humans and story-based scenarios. Once fully realized, ELS will make it possible for participants to enter places in time and space where they can interact with believable characters capable of conversation and action, and where they can observe and participate in events that are accessible only through simulation.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Iuppa, Nicholas
Experience Management Using Storyline Adaptation Strategies Proceedings Article
In: Proceedings of the First International Conference on Technologies for Digital Storytelling and Entertainment, Darmstadt, Germany, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_experience_2003,
title = {Experience Management Using Storyline Adaptation Strategies},
author = {Andrew S. Gordon and Nicholas Iuppa},
url = {http://ict.usc.edu/pubs/Experience%20Management%20Using%20Storyline%20Adaptation%20Strategies.PDF},
year = {2003},
date = {2003-03-01},
booktitle = {Proceedings of the First International Conference on Technologies for Digital Storytelling and Entertainment},
address = {Darmstadt, Germany},
abstract = {The central problem of creating interactive drama is structuring a media experience for participants such that a good story is presented while enabling a high degree of meaningful interactivity. This paper presents a new approach to interactive drama, where pre-authored storylines are made interactive by adapting them at run-time by applying strategies that react to unexpected user behavior. The approach, called Experience Management, relies heavily on the explication of a broad range of adaptation strategies and a means of selecting which strategy is most appropriate given a particular story context. We describe a formal approach to storyline representation to enable the selection of applicable strategies, and a strategy formalization that allows for storyline modification. Finally, we discuss the application of this approach in the context of a story-based training system for military leadership skills, and the direction for continuing research.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Hobbs, Jerry R.
Coverage and Competency in Formal Theories: A Commonsense Theory of Memory Proceedings Article
In: Proceedings of the 2003 AAAI Spring Symposium on Logical Formalizations of Commonsense Reasoning, Stanford University, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_coverage_2003,
title = {Coverage and Competency in Formal Theories: A Commonsense Theory of Memory},
author = {Andrew S. Gordon and Jerry R. Hobbs},
url = {http://ict.usc.edu/pubs/Coverage%20and%20Competency%20in%20Formal%20Theories-%20A%20Commonsense%20Theory%20of%20Memory.PDF},
year = {2003},
date = {2003-03-01},
booktitle = {Proceedings of the 2003 AAAI Spring Symposium on Logical Formalizations of Commonsense Reasoning},
address = {Stanford University},
abstract = {The utility of formal theories of commonsense reasoning will depend both on their competency in solving problems and on their concemptual coverage. We argue that the problems of coverage and competency can be decoupled and solved with different methods for a given commonsense domain. We describe a methodology for identifying the coverage requirements of theories through the large-sclae analysis of planning strategies, with further refinements made by collecting and categorizing instances of natural language expressions pertaining to the domain. We demonstrate the effectiveness of this methodology in identifying the representational coverage requirements of theories of the commonsense psychology of human memory. We then apply traditional methods of formalization to produce a formal first-order theory of commonsense memory with a high degree of competency and coverage.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Fleischman, Michael; Hovy, Eduard
NL Generation for Virtual Humans in a Complex Social Environment Proceedings Article
In: AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue, pp. 151–158, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_nl_2003,
title = {NL Generation for Virtual Humans in a Complex Social Environment},
author = {David Traum and Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/NL%20Generation%20for%20Virtual%20Humans%20in%20a%20Complex%20Social%20Environment.pdf},
year = {2003},
date = {2003-03-01},
booktitle = {AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue},
pages = {151–158},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Semantics and Pragmatics of Questions and Answers for Dialogue Agents Proceedings Article
In: International Workshop on Computational Semantics, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_semantics_2003,
title = {Semantics and Pragmatics of Questions and Answers for Dialogue Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Semantics%20and%20Pragmatics%20of%20Questions%20and%20Answers%20for%20Dialogue%20Agents.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Computational Semantics},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Uhrmacher, Adelinde; Swartout, William
Agent-Oriented Simulation Journal Article
In: Applied System Simulation, pp. 215–239, 2003.
Abstract | Links | BibTeX | Tags:
@article{uhrmacher_agent-oriented_2003,
title = {Agent-Oriented Simulation},
author = {Adelinde Uhrmacher and William Swartout},
url = {http://link.springer.com/chapter/10.1007/978-1-4419-9218-5_10},
year = {2003},
date = {2003-01-01},
journal = {Applied System Simulation},
pages = {215–239},
abstract = {Metaphors play a key role in computer science and engineering. Agents bring the notion of locality of information (as in object-oriented programming) together with locality of intent or purpose. The relation between multi-agent and simulation systems is multi-facetted. Simulation systems are used to evaluate software agents in virtual dynamic environments. Agents become part of the model design, if autonomous entities in general, and human or social actors in particular shall be modeled. A couple of research projects shall illuminate some of these facets.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Joshi, Pushkar; Tien, Wen C.; Desbrun, Mathieu; Pighin, Frédéric
Learning Controls for Blend Shape Based Realistic Facial Animation Proceedings Article
In: Breen, D.; Lin, M. (Ed.): Proceedings of the Eurographics/SIGGRAPH Symposium on Computer Animation, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{joshi_learning_2003,
title = {Learning Controls for Blend Shape Based Realistic Facial Animation},
author = {Pushkar Joshi and Wen C. Tien and Mathieu Desbrun and Frédéric Pighin},
editor = {D. Breen and M. Lin},
url = {http://ict.usc.edu/pubs/Learning%20Controls%20for%20Blend%20Shape%20Based%20Realistic%20Facial%20Animation.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {Blend shape animation is the method of choice for keyframe facial animation: a set of blend shapes (key facial expressions) are used to deï¬ne a linear space of facial expressions. However, in order to capture a signiï¬cant range of complexity of human expressions, blend shapes need to be segmented into smaller regions where key idiosyncracies of the face being animated are present. Performing this segmentation by hand requires skill and a lot of time. In this paper, we propose an automatic, physically-motivated segmentation that learns the controls and parameters directly from the set of blend shapes. We show the usefulness and efï¬ciency of this technique for both,motion-capture animation and keyframing. We also provide a rendering algorithm to enhance the visual realism of a blend shape model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 313–320, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2003,
title = {Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20Coping%20Behavior%20in%20Virtual%20Humans-%20Dont%20worry%20Be%20happy.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {313–320},
address = {Melbourne, Australia},
abstract = {This article builds on insights into how humans cope with emotion to guide the design of virtual humans. Although coping is increasingly viewed in the psychological literature as having a central role in human adaptive behavior, it has been largely ignored in computational models of emotion. In this paper, we show how psychological research on the interplay between human emotion, cognition and coping behavior can serve as a central organizing principle for the behavior of human-like autonomous agents. We present a detailed domain-independent model of coping based on this framework that significantly extends our previous work. We argue that this perspective provides novel insights into realizing adaptive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA), Kloster Irsee, Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2003,
title = {The Social Credit Assignment Problem},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Social%20Credit%20Assignment%20Problem.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
number = {ICT TR 02 2003},
address = {Kloster Irsee, Germany},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gardner, Andrew; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Linear Light Source Reflectometry Proceedings Article
In: ACM Transactions on Graphics, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{gardner_linear_2003,
title = {Linear Light Source Reflectometry},
author = {Andrew Gardner and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Linear%20Light%20Source%20Reflectometry.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {ACM Transactions on Graphics},
abstract = {This paper presents a technique for estimating the spatially-varying reflectance properties of a surface based on its appearance during a single pass of a linear light source. By using a linear light rather than a point light source as the illuminant, we are able to reliably observe and estimate the diffuse color, specular color, and specular roughness of each point of the surface. The reflectometry apparatus we use is simple and inexpensive to build, requiring a single direction of motion for the light source and a fixed camera viewpoint. Our model fitting technique first renders a reflectance table of how diffuse and specular reflectance lobes would appear under moving linear light source illumination. Then, for each pixel we compare its series of intensity values to the tabulated reflectance lobes to determine which reflectance model parameters most closely produce the observed reflectance values. Using two passes of the linear light source at different angles, we can also estimate per-pixel surface normals as well as the reflectance parameters. Additionally our system records a per-pixel height map for the object and estimates its per-pixel translucency. We produce real-time renderings of the captured objects using a custom hardware shading algorithm. We apply the technique to a test object exhibiting a variety of materials as well as to an illuminated manuscript with gold lettering. To demonstrate the technique's accuracy, we compare renderings of the captured models to real photographs of the original objects.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Neumann, Ulrich; Piepol, Diane; Swartout, William
FlatWorld: Combining Hollywood Set-Design Techniques with VR Journal Article
In: IEEE Computer Graphics and Applications, no. January/February, 2003.
@article{pair_flatworld_2003,
title = {FlatWorld: Combining Hollywood Set-Design Techniques with VR},
author = {Jarrell Pair and Ulrich Neumann and Diane Piepol and William Swartout},
editor = {Lawrence Rosenblum and Macedonia},
url = {http://ict.usc.edu/pubs/FlatWorld-%20Combining%20Hollywood%20Set-Design%20Techniques%20with%20VR.pdf},
year = {2003},
date = {2003-01-01},
journal = {IEEE Computer Graphics and Applications},
number = {January/February},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Mao, Wenji
Automating After Action Review: Attributing Blame or Credit in Team Training Proceedings Article
In: Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation, Scottsdale, AZ, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_automating_2003,
title = {Automating After Action Review: Attributing Blame or Credit in Team Training},
author = {Jonathan Gratch and Wenji Mao},
url = {http://ict.usc.edu/pubs/Automating%20After%20Action%20Review-%20Attributing%20Blame%20or%20Credit%20in%20Team%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation},
address = {Scottsdale, AZ},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem (Extended Version) Technical Report
University of Southern California Institute for Creative Technologies Kloster Irsee, Germany, no. ICT TR 02 2003, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_social_2003-1,
title = {The Social Credit Assignment Problem (Extended Version)},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT%20TR%2002%202003.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
number = {ICT TR 02 2003},
address = {Kloster Irsee, Germany},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David; Larsson, Staffan
The Information State Approach to Dialogue Management Book Section
In: Current and New Directions in Discourse and Dialogue, pp. 325–353, 2003.
Links | BibTeX | Tags: Virtual Humans
@incollection{traum_information_2003,
title = {The Information State Approach to Dialogue Management},
author = {David Traum and Staffan Larsson},
url = {http://ict.usc.edu/pubs/The%20Information%20State%20Approach%20to%20Dialogue%20Management.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Current and New Directions in Discourse and Dialogue},
pages = {325–353},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Cao, Yong; Faloutsos, Petros; Pighin, Frédéric
Unsupervised Learning for Speech Motion Editing Proceedings Article
In: Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{cao_unsupervised_2003,
title = {Unsupervised Learning for Speech Motion Editing},
author = {Yong Cao and Petros Faloutsos and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Unsupervised%20Learning%20for%20Speech%20Motion%20Editing.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {We present a new method for editing speech related facial motions. Our method uses an unsupervised learning technique, Independent Component Analysis (ICA), to extract a set of meaningful parameters without any annotation of the data. With ICA, we are able to solve a blind source separation problem and describe the original data as a linear combination of two sources. One source captures content (speech) and the other captures style (emotion). By manipulating the independent components we can edit the motions in intuitive ways.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Pighin, Frédéric
Hybrid Control For Interactive Character Animation Proceedings Article
In: Proceedings of the 11th Pacific Conference on Computer Graphics and Applications, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{shapiro_hybrid_2003,
title = {Hybrid Control For Interactive Character Animation},
author = {Ari Shapiro and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Hybrid%20Control%20For%20Interactive%20Character%20Animation.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications},
abstract = {We implement a framework for animating interactive characters by combining kinematic animation with physical simulation. The combination of animation techniques allows the characters to exploit the advantages of each technique. For example, characters can perform natural-looking kinematic gaits and react dynamically to unexpected situations.Kinematic techniques such as those based on motion capture data can create very natural-looking animation. However, motion capture based techniques are not suitable for modeling the complex interactions between dynamically interacting characters. Physical simulation, on the other hand, is well suited for such tasks. Our work develops kinematic and dynamic controllers and transition methods between the two control methods for interactive character animation. In addition, we utilize the motion graph technique to develop complex kinematic animation from shorter motion clips as a method of kinematic control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Williams, Josh
The Gestalt of Virtual Environments Proceedings Article
In: International Workshop on Presence, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_gestalt_2003,
title = {The Gestalt of Virtual Environments},
author = {Jacquelyn Morie and Josh Williams},
url = {http://ict.usc.edu/pubs/The%20Gestalt%20of%20Virtual%20Environments.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Presence},
abstract = {The majority of research in the field of virtual reality to date has focused on increasing the fidelity of the environments created and trying to determine the quality of the participant experience. Efforts have been made to quantify such aspects, especially in regards to visuals and sound, and to a lesser extent to the user experience. Recent thinking has tended towards the assumption that ever-greater fidelity would ensure a better user experience. However, such emphasis on photo-realism and audio-realism does not take into account the collective results of our multimodal sensory inputs with their intertwined effects. Our design philosophy for the creation of virtual environments attempts to replicate the human experience, and asks the question: Is there an underlying fidelity of feels-real through which the quality of the participant experience could be improved?},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff; Gratch, Jonathan; Marsella, Stacy C.
Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 441–448, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_negotiation_2003,
title = {Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training},
author = {David Traum and Jeff Rickel and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Negotiation%20over%20Tasks%20in%20Hybrid%20Human-Agent%20Teams%20for%20Simulation-Based%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {441–448},
address = {Melbourne, Australia},
abstract = {The effectiveness of simulation-based training for individual tasks – such as piloting skills – is well established, but its use for team training raises challenging technical issues. Ideally, human users could gain valuable leadership experience by interacting with synthetic teammates in realistic and potentially stressful scenarios. However, creating human-like teammates that can support flexible, natural interactions with humans and other synthetic agents requires integrating a wide variety of capabilities, including models of teamwork, models of human negotiation, and the ability to participate in face-to-face spoken conversations in virtual worlds. We have developed such virtual humans by integrating and extending prior work in these areas, and we have applied our virtual humans to an example peacekeeping training scenario to guide and evaluate our research. Our models allow agents to reason about authority and responsibility for individual actions in a team task and, as appropriate, to carry out actions, give and accept orders, monitor task execution, and negotiate options. Negotiation is guided by the agents' dynamic assessment of alternative actions given the current scenario conditions, with the aim of guiding the human user towards an ability to make similar assessments.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Habash, Nizar; Dorr, Bonnie; Traum, David
Hybrid Natural Language Generation from Lexical Conceptual Structures Journal Article
In: Machine Translation, vol. 18, pp. 81–127, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{habash_hybrid_2003,
title = {Hybrid Natural Language Generation from Lexical Conceptual Structures},
author = {Nizar Habash and Bonnie Dorr and David Traum},
url = {http://ict.usc.edu/pubs/Hybrid%20Natural%20Language%20Generation%20from%20Lexical%20%20Conceptual%20Structures.pdf},
year = {2003},
date = {2003-01-01},
journal = {Machine Translation},
volume = {18},
pages = {81–127},
abstract = {This paper describes Lexogen, a system for generating natural-language sentences from Lexical Conceptual Structure, an interlingual representation. The system has been developed as part of a Chinese–English Machine Translation (MT) system; however, it is designed to be used for many other MT language pairs and natural language applications. The contributions of this work include: (1) development of a large-scale Hybrid Natural Language Generation system with language-independent components; (2) enhancements to an interlingual representation and asso- ciated algorithm for generation from ambiguous input; (3) development of an efficient reusable language-independent linearization module with a grammar description language that can be used with other systems; (4) improvements to an earlier algorithm for hierarchically mapping thematic roles to surface positions; and (5) development of a diagnostic tool for lexicon coverage and correct- ness and use of the tool for verification of English, Spanish, and Chinese lexicons. An evaluation of Chinese–English translation quality shows comparable performance with a commercial translation system. The generation system can also be extended to other languages and this is demonstrated and evaluated for Spanish.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Martinovski, Bilyana; Traum, David
The Error Is the Clue: Breakdown In Human-Machine Interaction Proceedings Article
In: Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association, Switzerland, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_error_2003,
title = {The Error Is the Clue: Breakdown In Human-Machine Interaction},
author = {Bilyana Martinovski and David Traum},
url = {http://ict.usc.edu/pubs/The%20Error%20Is%20the%20Clue-%20Breakdown%20In%20Human-Machine%20Interaction.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association},
address = {Switzerland},
abstract = {This paper focuses not on the detection and correction of specific errors in the interaction between machines and humans, but rather cases of massive deviation from the user's conversational expectations and desires. This can be the result of too many or too unusual errors, but also from dialogue strategies disigned to minimize error, which make the interaction unnatutal in other ways. We study causes of irritation such as over-fragmentation, over-clarity, over-coordination, over-directedness, and repetiveness of verbal action, syntax, and intonation. Human reations to these irritating features typically appear in the following order: tiredness, tolerance, anger, confusion, irony, humor, exhaustion, uncertainty, lack of desire to communicate. The studied features of human expressions of irritation in non-face-to-face interaction are: intonation, emphatic speech, elliptic speech, speed of speech, extra-linguistic signs, speed of verbal action, and overlap.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Unger, J.; Wenger, Andreas; Hawkins, Tim; Gardner, Andrew; Debevec, Paul
Capturing and Rendering With Incident Light Fields Proceedings Article
In: Proceedings of the 14th Eurographics workshop on Rendering, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{unger_capturing_2003,
title = {Capturing and Rendering With Incident Light Fields},
author = {J. Unger and Andreas Wenger and Tim Hawkins and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Rendering%20With%20Incident%20Light%20Fields.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 14th Eurographics workshop on Rendering},
abstract = {This paper presents a process for capturing spatially and directionally varying illumination from a real-world scene and using this lighting to illuminate computer-generated objects. We use two devices for capturing such illumination. In the first we photograph an array of mirrored spheres in high dynamic range to capture the spatially varying illumination. In the second, we obtain higher resolution data by capturing images with an high dynamic range omnidirectional camera as it traverses across a plane. For both methods we apply the light field technique to extrapolate the incident illumination to a volume. We render computer-generated objects as illuminated by this captured illumination using a custom shader within an existing global illumination rendering system. To demonstrate our technique we capture several spatially-varying lighting environments with spotlights, shadows, and dappled lighting and use them to illuminate synthetic scenes. We also show comparisons to real objects under the same illumination.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Moore, Benjamin
QuBit Documentation Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2003, 2003.
@techreport{moore_qubit_2003,
title = {QuBit Documentation},
author = {Benjamin Moore},
url = {http://ict.usc.edu/pubs/QuBit%20Documentation.pdf},
year = {2003},
date = {2003-01-01},
number = {ICT TR 01.2003},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
2002
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of Spatial Averaging Equalization Methods: A Statistical Approach Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002-1,
title = {Robustness of Spatial Averaging Equalization Methods: A Statistical Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Spatial%20Averaging%20Equalization%20Methods-%20A%20Statistical%20Approach.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of room responses. However, the performance of averaging based equalization, at the listeners, may be affected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to performance of spatial averaging based equalization. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
Perceptual Multiple Location Equalization with Clustering Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_perceptual_2002,
title = {Perceptual Multiple Location Equalization with Clustering},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Perceptual%20Multiple%20Location%20Equalization%20with%20Clustering.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Typically, room equalization techniques do not focus on designing ï¬lters that equalize the room transfer functions on perceptually relevant spectral features. In this paper we address the problem of room equalization for multiple listeners, simultaneously, using a perceptually designed equalization ï¬lter based on pattern recognition techniques. Some features of the proposed ï¬lter are, its ability to perform simultaneous equalization at multiple locations, a reduced order, and a psychoacoustically motivated design. In summary, the simultaneous multiple location equalization, using a pattern recognition method, is performed over perceptually relevant spectral components derived from the auditory ï¬ltering mechanism.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Hilmes, Philip; Kyriakakis, Chris
Robustness of Multiple Listener Equalization With Magnitude Response Averaging Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002,
title = {Robustness of Multiple Listener Equalization With Magnitude Response Averaging},
author = {Sunil Bharitkar and Philip Hilmes and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Multiple%20Listener%20Equalization%20With%20Magnitude%20Response%20Averaging.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of magnitude responses at locations of interest. However, the performance of averaging based equalization, at the listeners, may be a!ected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to a performance metric of equalization for magnitude response averaging. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgiou, Panayiotis G.; Kyriakakis, Chris
An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgiou_alternative_2002,
title = {An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model},
author = {Panayiotis G. Georgiou and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/An%20Alternative%20Model%20for%20Sound%20Signals%20Encountered%20in%20Reverberant%20Environments%3b%20Robust%20Maximum%20Likelihood%20Localization%20and%20Parameter%20Estimation%20Based%20on%20a%20Sub-Gaussian%20Model.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {In this paper we investigate an alternative to the Gaussian density for modeling signals encountered in audio environments. The observation that sound signals are impulsive in nature, combined with the reverberation e!ects commonly encountered in audio, motivates the use of the Sub-Gaussian density. The new Sub-Gaussian statistical model and the separable solution of its Maximum Likelihood estimator are derived. These are used in an array scenario to demonstrate with both simulations and two different microphone arrays the achievable performance gains. The simulations exhibit the robustness of the sub-Gaussian based method while the real world experiments reveal a signiï¬cant performance gain, supporting the claim that the sub-Gaussian model is better suited for sound signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, W. Lewis; Narayanan, Shrikanth; Whitney, Richard; Das, Rajat; Labore, Catherine
Limited Domain Synthesis of Expressive Military Speech for Animated Characters Proceedings Article
In: IEEE 2002 Workshop on Speech Synthesis, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{johnson_limited_2002,
title = {Limited Domain Synthesis of Expressive Military Speech for Animated Characters},
author = {W. Lewis Johnson and Shrikanth Narayanan and Richard Whitney and Rajat Das and Catherine Labore},
url = {http://ict.usc.edu/pubs/Limited%20Domain%20Synthesis%20of%20Expressive%20Military%20Speech%20for%20Animated%20Characters.pdf},
year = {2002},
date = {2002-09-01},
booktitle = {IEEE 2002 Workshop on Speech Synthesis},
abstract = {Text-to-speech synthesis can play an important role in interactive education and training applications, as voices for animated agents. Such agents need high-quality voices capable of expressing intent and emotion. This paper presents preliminary results in an effort aimed at synthesizing expressive military speech for training applications. Such speech has acoustic and prosodic characteristics that can differ markedly from ordinary conversational speech. A limited domain synthesis approach is used employing samples of expressive speech, classified according to speaking style. The resulting synthesizer was tested both in isolation and in the context of a virtual reality training scenario with animated characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
The Theory of Mind in Strategy Representations Proceedings Article
In: Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci), Lawrence Erlbaum Associates, George Mason University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_theory_2002,
title = {The Theory of Mind in Strategy Representations},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Theory%20of%20Mind%20in%20Strategy%20Representations.PDF},
year = {2002},
date = {2002-08-01},
booktitle = {Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci)},
publisher = {Lawrence Erlbaum Associates},
address = {George Mason University},
abstract = {Many scientific fields continue to explore cognition related to Theory of Mind abilities, where people reason about the mental states of themselves and others. Experimental and theoretical approaches to this problem have largely avoided issues concerning the contents of representations employed in this class of reasoning. In this paper, we describe a new approach to the investigation of representations related to Theory of Mind abilities that is based on the analysis of commonsense strategies. We argue that because the mental representations of strategies must include concepts of mental states and processes, the large-scale analysis of strategies can be informative of the representational scope of Theory of Mind abilities. The results of an analysis of this sort are presented as a description of thirty representational areas that organize the breadth of Theory of Mind concepts. Implications for Theory Theories and Simulation Theories of Theory of Mind reasoning are discussed.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Angros, Richard Jr.; Johnson, W. Lewis; Rickel, Jeff; Scholer, Andrew
Learning Domain Knowledge for Teaching Procedural Skills Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{angros_learning_2002,
title = {Learning Domain Knowledge for Teaching Procedural Skills},
author = {Richard Jr. Angros and W. Lewis Johnson and Jeff Rickel and Andrew Scholer},
url = {http://ict.usc.edu/pubs/Learning%20Domain%20Knowledge%20for%20Teaching%20Procedural%20Skills.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
abstract = {This paper describes a method for acquiring procedural knowledge for use by pedagogical agents in interactive simulation-based learning environments. Such agents need to be able to adapt their behavior to the changing conditions of the simulated world, and respond appropriately in mixed-initiative interactions with learners. This requires a good understanding of the goals and causal dependencies in the procedures being taught. Our method, inspired by human tutorial dialog, combines direct speciï¬cation, demonstration, and experimentation. The human instructor demonstrates the skill being taught, while the agent observes the effects of the procedure on the simulated world. The agent then autonomously experiments with the procedure, making modiï¬cations to it, in order to understand the role of each step in the procedure. At various points the instructor can provide clariï¬cations, and modify the developing procedural description as needed. This method is realized in a system called Diligent, which acquires procedural knowledge for the STEVE animated pedagogical agent.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff
Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_embodied_2002,
title = {Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds},
author = {David Traum and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Embodied%20Agents%20for%20Multi-party%20Dialogue%20in%20Immersive%20%20Virtual%20Worlds.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kovar, Lucas; Gleicher, Michael; Pighin, Frédéric
Motion Graphs Proceedings Article
In: Proceedings of SIGGRAPH '02, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{kovar_motion_2002,
title = {Motion Graphs},
author = {Lucas Kovar and Michael Gleicher and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Motion%20Graphs.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {Proceedings of SIGGRAPH '02},
address = {San Antonio, TX},
abstract = {n this paper we present a novel method for creating realistic, controllable motion. Given a corpus of motion capture data, we automatically construct a directed graph called a motion graph that encapsulates connections among the database. The motion graph consists both of pieces of original motion and automatically generated transitions. Motion can be generated simply by building walks on the graph. We present a general framework for extracting particular graph walks that meet a user's specifications. We then show how this framework can be applied to the specific problem of generating different styles of locomotion along arbitrary paths.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Waese, Jamie; Hawkins, Tim
A Lighting Reproduction Approach to Live-Action Compositing Proceedings Article
In: SIGGRAPH 2002, pp. 547–556, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_lighting_2002,
title = {A Lighting Reproduction Approach to Live-Action Compositing},
author = {Paul Debevec and Andreas Wenger and Chris Tchou and Andrew Gardner and Jamie Waese and Tim Hawkins},
url = {http://ict.usc.edu/pubs/A%20Lighting%20Reproduction%20Approach%20to%20Live-Action%20Compositing.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {SIGGRAPH 2002},
pages = {547–556},
address = {San Antonio, TX},
abstract = {We describe a process for compositing a live performance of an actor into a virtual set wherein the actor is consistently illuminated by the virtual environment. The Light Stage used in this work is a two-meter sphere of inward-pointing RGB light emitting diodes focused on the actor, where each light can be set to an arbitrary color and intensity to replicate a real-world or virtual lighting environment. We implement a digital two-camera infrared matting system to composite the actor into the background plate of the environment without affecting the visible-spectrum illumination on the actor. The color reponse of the system is calibrated to produce correct color renditions of the actor as illuminated by the environment. We demonstrate moving-camera composites of actors into real-world environments and virtual sets such that the actor is properly illuminated by the environment into which they are composited.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Influence of Emotion on Belief for Virtual Training Simulations Proceedings Article
In: Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2002,
title = {Modeling the Influence of Emotion on Belief for Virtual Training Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20influence%20of%20emotion.pdf},
year = {2002},
date = {2002-06-01},
booktitle = {Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation},
address = {Orlando, FL},
abstract = {Recognizing and managing emotion in oneself and in those under ones command is an important component of leadership training. Most computational models of emotion have focused on the problem of identifying emotional features of the physical environment and mapping that into motivations to act in the world. But emotions also influence how we perceive the world and how we communicate that perception to others. This paper outlines an initial computational foray into this more vexing problem.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Lent, Michael
Virtual Humans as Participants vs. Virtual Humans as Actors Proceedings Article
In: AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_virtual_2002,
title = {Virtual Humans as Participants vs. Virtual Humans as Actors},
author = {Andrew S. Gordon and Michael Lent},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20as%20Participants%20vs%20Virtual%20Humans%20as%20Actors.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu Proceedings Article
In: The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_enabling_2002,
title = {Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Enabling%20and%20recognizing%20strategic%20play%20in%20strategy%20games-%20Lessons%20from%20Sun%20Tzu.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
abstract = {The interactive entertainment genre of the strategy game entertains users by allowing them to engage in strategic play, which should encourage game designers to devote development efforts toward facilitating users that wish to employ commonsense strategies, and to recognize and react to specific user strategies during game play. This paper attempts to facilitate these development efforts by identifying and analyzing 43 strategies from Sun Tzu's The Art of War, which are broadly applicable across games in the strategy game genre. For each strategy, a set of specific actions are identified that should be provided to users to enable their execution, along with generalized recognition rules that can facilitatethe design of entertaining responses to users' strategic behavior. Consideration of how the enabling actions could be incorporated into an existing strategy game is provided.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Fleischman, Michael; Hovy, Eduard
Emotional Variation in Speech-Based Natural Language Generation Proceedings Article
In: International Natural Language Generation Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{fleischman_emotional_2002,
title = {Emotional Variation in Speech-Based Natural Language Generation},
author = {Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/Emotional%20Variation%20in%20Speech-Based%20Natural%20Language%20Generation.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Natural Language Generation Conference},
abstract = {We present a framework for handling emotional variations in a speech-based natural language system for use in the MRE virtual training environment. The system is a first step toward addressing issues in emotion-based modeling of verbal communicative behavior. We cast the problem of emotional generation as a distance minimization task, in which the system chooses between multiple valid realizations for a given input based on the emotional distance of each realization from the speaker's attitude toward that input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Tutorial on Image-Based Lighting Journal Article
In: IEEE Computer Graphics and Applications, 2002.
Links | BibTeX | Tags: Graphics
@article{debevec_tutorial_2002,
title = {A Tutorial on Image-Based Lighting},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Lighting.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Computer Graphics and Applications},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.; Han, Changhee; Lent, Michael
Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments Proceedings Article
In: Proceedings of 14th Innovative Applications of Artificial Intelligence Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{hill_applying_2002,
title = {Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments},
author = {Randall W. Hill and Changhee Han and Michael Lent},
url = {http://ict.usc.edu/pubs/Applying%20Perceptually%20Driven%20Cognitive%20Mapping%20To%20Virtual%20Urban%20Environments.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of 14th Innovative Applications of Artificial Intelligence Conference},
abstract = {This paper describes a method for building a cognitive map of a virtual urban environment. Our routines enable virtual humans to map their environment using a realistic model of perception. We based our implementation on a computational framework proposed by Yeap and Jefferies (Yeap & Jefferies 1999) for representing a local environment as a structure called an Absolute Space Representation (ASR). Their algorithms compute and update ASRs from a 2-1/2D 1 sketch of the local environment, and then connect the ASRs together to form a raw cognitive map. Our work extends the framework developed by Yeap and Jefferies in three important ways. First, we implemented the framework in a virtual training environment, the Mission Rehearsal Exercise (Swartout et al. 2001). Second, we describe a method for acquiring a 2- 1/2D sketch in a virtual world, a step omitted from their framework, but which is essential for computing an ASR. Third, we extend the ASR algorithm to map regions that are partially visible through exits of the local space. Together, the implementation of the ASR algorithm along with our extensions will be useful in a wide variety of applications involving virtual humans and agents who need to perceive and reason about spatial concepts in urban environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication Proceedings Article
In: Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_ideas_2002,
title = {Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Ideas%20on%20Multi-layer%20Dialogue%20Management%20for%20Multi-party,%20Multi-conversation,%20Multi-modal%20Communication.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}