Publications
Search
Hawkins, Tim; Einarsson, Per; Debevec, Paul
A Dual Light Stage Proceedings Article
In: Dutré, Philip; Bala, Kavita (Ed.): Eurographics Symposium on Rendering, Konstanz, Germany, 2005.
@inproceedings{hawkins_dual_2005,
title = {A Dual Light Stage},
author = {Tim Hawkins and Per Einarsson and Paul Debevec},
editor = {Philip Dutré and Kavita Bala},
url = {http://ict.usc.edu/pubs/A%20Dual%20Light%20Stage.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Eurographics Symposium on Rendering},
address = {Konstanz, Germany},
abstract = {We present a technique for capturing high-resolution 4D reflectance ï¬elds using the reciprocity property of light transport. In our technique we place the object inside a diffuse spherical shell and scan a laser across its surface. For each incident ray, the object scatters a pattern of light onto the inner surface of the sphere, and we photograph the resulting radiance from the sphere's interior using a camera with a ï¬sheye lens. Because of reciprocity, the image of the inside of the sphere corresponds to the reflectance function of the surface point illuminated by the laser, that is, the color that point would appear to a camera along the laser ray when the object is lit from each direction on the surface of the sphere. The measured reflectance functions allow the object to be photorealistically rendered from the laser's viewpoint under arbitrary directional illumination conditions. Since each captured reflectance function is a high-resolution image, our data reproduces sharp specular reflections and self-shadowing more accurately than previous approaches. We demonstrate our technique by scanning objects with a wide range of reflectance properties and show accurate renderings of the objects under novel illumination conditions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kallmann, Marcelo
Scalable Solutions for Interactive Virtual Humans that can Manipulate Objects Proceedings Article
In: First Annual Artificial Intelligence and Interactive Entertainment Conference, Marina del Rey, CA, 2005.
@inproceedings{kallmann_scalable_2005,
title = {Scalable Solutions for Interactive Virtual Humans that can Manipulate Objects},
author = {Marcelo Kallmann},
url = {http://ict.usc.edu/pubs/Scalable%20Solutions%20for%20Interactive%20Virtual%20Humans%20that%20can%20Manipulate%20Objects.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {First Annual Artificial Intelligence and Interactive Entertainment Conference},
address = {Marina del Rey, CA},
abstract = {This paper presents scalable solutions for achieving virtual humans able to manipulate objects in interactive virtual environments. The scalability trades computational time with the ability of addressing increasingly difficult constraints. In time-critical environments, arm motions are computed in few milliseconds using fast analytical Inverse Kinematics. For other types of applications where collision-free motions are required, a randomized motion planner capable of generating motions of average complexity in about a second of computation time is employed. The steps required for defining and computing different types of manipulations are described in this paper.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kock, Arien; Gratch, Jonathan
An Evaluation of Automatic Lip-syncing Methods for Game Environments Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2005, 2005.
@techreport{kock_evaluation_2005,
title = {An Evaluation of Automatic Lip-syncing Methods for Game Environments},
author = {Arien Kock and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR.01.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 01 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Lip-synching is the production of articulator motion corresponding to a given audible utterance. The Mission Rehearsal Exercise training system requires lip-synching to increase the believability of its virtual agents. In this report I document the selection, exploration, evaluation and comparison of several candidate lip-synching systems, ending with a recommendation. The evaluation focuses on the believability of articulators' expression, the foreseeable difficulty of integration into MRE’s architecture, the support for facial expressions related to semantics and prosodic features as well as the scalability of each system.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Pighin, Frédéric; Patel, Sanjit; Cohen, Jonathan; Chu, Anson
Oriented Particle Level Set for Fluid Simulation Book
2005.
@book{pighin_oriented_2005,
title = {Oriented Particle Level Set for Fluid Simulation},
author = {Frédéric Pighin and Sanjit Patel and Jonathan Cohen and Anson Chu},
url = {http://ict.usc.edu/pubs/Oriented%20Particle%20Level%20Set%20for%20Fluid%20Simulation.pdf},
year = {2005},
date = {2005-01-01},
abstract = {The particle level set technique has been adopted in computer graphics as the method of choice for tracking the surface of simulated liquids. In this poster, we describe a novel technique for modeling such an interface. Our technique is based on a set of oriented particles that provides a piecewise linear approximation to the interface. Using this improved model, we obtain a more accurate representation of the water surface and reduced mass loss during simulation.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
Controlling the Focus of Perceptual Attention in Embodied Conversational Agents Proceedings Article
In: Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems, 2005, ISBN: 1-59593-093-0.
@inproceedings{kim_controlling_2005,
title = {Controlling the Focus of Perceptual Attention in Embodied Conversational Agents},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/Controlling%20the%20Focus%20of%20Perceptual%20Attention%20in%20Embodied%20Conversational%20Agents.pdf},
doi = {10.1145/1082473.1082641},
isbn = {1-59593-093-0},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems},
abstract = {In this paper, we present a computational model of dynamic perceptual attention for virtual humans. The computational models of perceptual attention that we surveyed fell into one of two camps: top-down and bottom-up. Biologically inspired computational models [2] typically focus on the bottom-up aspects of attention, while most virtual humans [1,3,7] implement a top-down form of attention. Bottom-up attention models only consider the sensory information without taking into consideration the saliency based on tasks or goals. As a result, the outcome of a purely bottom-up model will not consistently match the behavior of real humans in certain situations. Modeling perceptual attention as a purely top-down process, however, is also not sufficient for implementing a virtual human. A purely top-down model does not take into account the fact that virtual humans need to react to perceptual stimuli vying for attention. Top-down systems typically handle this in an ad hoc manner by encoding special rules to catch certain conditions in the environment. The problem with this approach is that it does not provide a principled way of integrating the ever-present bottom-up perceptual stimuli with top-down control of attention. This model extends the prior model [7] with perceptual resolution based on psychological theories of human perception [4]. This model allows virtual humans to dynamically interact with objects and other individuals, balancing the demands of goal-directed behavior with those of attending to novel stimuli. This model has been implemented and tested with the MRE Project [5].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
The Fictionalization of Lessons Learned Journal Article
In: IEEE Multimedia, vol. 12, no. 4, pp. 12–14, 2005.
@article{gordon_fictionalization_2005,
title = {The Fictionalization of Lessons Learned},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Fictionalization%20of%20Lessons%20Learned.pdf},
year = {2005},
date = {2005-01-01},
journal = {IEEE Multimedia},
volume = {12},
number = {4},
pages = {12–14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Responsive Behavior of a Listening Agent Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2005, 2005.
@techreport{maatman_responsive_2005,
title = {Responsive Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/ICT-TR.02.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 02 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {The purpose of this assignment is twofold. First the possibility of generating real time responsive behavior is evaluated in order to create a more human-like agent. Second, the effect of the behavior of the agent on the human interactor is evaluated. The main motivation for the focus on responsive gestures is because much research has been done already on gestures that accompany the speaker, and nothing on gesture that accompany the listener, although responsiveness is a crucial part of a conversation. The responsive behavior of a virtual agent consists of performing gestures during the time a human is speaking to the agent. To generate the correct gestures, first a literature research is carried out, from which is concluded that with the current of the current Natural Language Understanding technology, it is not possible to extract semantic features of the human speech in real time. Thus, other features have to be considered. The result of the literature research is a basic mapping between real time obtainable features and their correct responsive behavior: - if the speech contains a relatively long period of low pitch then perform a head nod. - if the speech contains relatively high intensity then perform a head nod - if the speech contains disfluency then perform a posture shift, gazing behavior or a frown - if the human performs a posture shift then mirror this posture shift - if the human performs a head shake then mirror this head shake - if the human performs major gazing behavior then mimic this behavior A design has been made to implement this mapping into the behavior of a virtual agent and this design has been implemented which results in two programs. One to mirror the physical features of the human and one to extract the speech features from the voice of the human. The two programs are combined and the effect of the resulting behavior on the human interactor has been tested. The results of these tests are that the performing of responsive behavior has a positive effect on the natural behavior of a virtual agent and thus looks promising for future research. However, the gestures proposed by this mapping are not always context-independent. Thus, much refinement is still to be done and more functionality can be added to improve the responsive behavior. The conclusion of this research is twofold. First the performing of responsive behaviors in real time is possible with the presented mapping and this results in a more natural behaving agent. Second, some responsive behavior is still dependant of semantic information. This leaves open the further enhancement of the presented mapping in order to increase the responsive behavior.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Alpaslan, Z. Y.; Yeh, S. -C.; Rizzo, Albert; Sawchuk, Alexander A.
Quantitative Comparison of Interaction with Shutter Glasses and Autostereoscopic Displays Proceedings Article
In: Stereoscopic Displays and Virtual Reality Systems XII Symposium, San Jose, CA, 2005.
@inproceedings{alpaslan_quantitative_2005,
title = {Quantitative Comparison of Interaction with Shutter Glasses and Autostereoscopic Displays},
author = {Z. Y. Alpaslan and S. -C. Yeh and Albert Rizzo and Alexander A. Sawchuk},
url = {http://ict.usc.edu/pubs/Quantitative%20Comparison%20of%20Interaction%20with%20Shutter%20Glasses%20and%20Autostereoscopic%20Displays.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Stereoscopic Displays and Virtual Reality Systems XII Symposium},
address = {San Jose, CA},
abstract = {In this paper we describe experimental measurements and comparison of human interaction with three different types of stereo computer displays. We compare traditional shutter glasses-based viewing with three-dimensional (3D) autostereoscopic viewing on displays such as the Sharp LL-151-3D display and StereoGraphics SG 202 display. The method of interaction is a sphere-shaped "cyberprop" containing an Ascension Flock-of-Birds tracker that allows a user to manipulate objects by imparting the motion of the sphere to the virtual object. The tracking data is processed with OpenGL to manipulate objects in virtual 3D space, from which we synthesize two or more images as seen by virtual cameras observing them. We concentrate on the quantitative measurement and analysis of human performance for interactive object selection and manipulation tasks using standardized and scalable configurations of 3D block objects. The experiments use a series of progressively more complex block configurations that are rendered in stereo on various 3D displays. In general, performing the tasks using shutter glasses required less time as compared to using the autostereoscopic displays. While both male and female subjects performed almost equally fast with shutter glasses, male subjects performed better with the LL-151-3D display, while female subjects performed better with the SG202 display. Interestingly, users generally had a slightly higher efficiency in completing a task set using the two autostereoscopic displays as compared to the shutter glasses, although the differences for all users among the displays was relatively small. There was a preference for shutter glasses compared to autostereoscopic displays in the ease of performing tasks, and glasses were slightly preferred for overall image quality and stereo image quality. However, there was little difference in display preference in physical comfort and overall preference. We present some possible explanations of these results and point out the importance of the autostereoscopic "sweet spot" in relation to the user's head and body position.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Mao, Wenji; Gratch, Jonathan; Marsella, Stacy C.
Mitigation Theory: An Integrated Approach Proceedings Article
In: Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci), Stresa, Italy, 2005.
@inproceedings{martinovski_mitigation_2005,
title = {Mitigation Theory: An Integrated Approach},
author = {Bilyana Martinovski and Wenji Mao and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Mitigation%20Theory-%20An%20Integrated%20Approach.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci)},
address = {Stresa, Italy},
abstract = {The purpose of this paper is to develop a theoretical model of mitigation by integrating cognitive and discourse approaches to appraisal and coping. Mitigation involves strategic, emotional, linguistic, and Theory of Mind processes on different levels of consciousness. We emphasize that discourse analysis can assist our understanding of these processes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Evaluating Social Causality and Responsibility Models: An Initial Report Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 03 2005, 2005.
@techreport{mao_evaluating_2005,
title = {Evaluating Social Causality and Responsibility Models: An Initial Report},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR-03-2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 03 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent sys- tems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich believability and cognitive capabili- ties of social intelligent agents. In this report, we present a general computational model of social causality and responsibility, and empirical results of a preliminary evaluation of the model in comparison with several other approaches.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Natural Behavior of a Listening Agent Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA), pp. 25–36, Kos, Greece, 2005.
@inproceedings{maatman_natural_2005,
title = {Natural Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Natural%20Behavior%20of%20a%20Listening%20Agent.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA)},
pages = {25–36},
address = {Kos, Greece},
abstract = {In contrast to the variety of listening behaviors produced in human-to-human interaction, most virtual agents sit or stand passively when a user speaks. This is a reflection of the fact that although the correct responsive behavior of a listener during a conversation is often related to the semantics, the state of current speech understanding technology is such that semantic information is unavailable until after an utterance is complete. This paper will illustrate that appropriate listening behavior can also be generated by other features of a speaker's behavior that are available in real time such as speech quality, posture shifts and head movements. This paper presents a mapping from these real-time obtainable features of a human speaker to agent listening behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; McNerney, Peter J.; Eastlund, Ernie; Manson, Brian; Gratch, Jonathan; Hill, Randall W.; Swartout, William
Development of a VR Therapy Application for Iraq War Military Personnel with PTSD Book Section
In: Studies in Health Technology and Informatics, vol. 111, no. 13, pp. 407+413, 13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA, 2005.
@incollection{rizzo_development_2005-1,
title = {Development of a VR Therapy Application for Iraq War Military Personnel with PTSD},
author = {Albert Rizzo and Jarrell Pair and Peter J. McNerney and Ernie Eastlund and Brian Manson and Jonathan Gratch and Randall W. Hill and William Swartout},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20VR%20Therapy%20Application%20for%20Iraq%20War%20Veterans%20with%20PTSD.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Studies in Health Technology and Informatics},
volume = {111},
number = {13},
pages = {407+413},
address = {13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA},
series = {Medicine Meets Virtual Reality},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 returning Iraq War military personnel are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure therapy has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of an Iraq War PTSD VR application that is being created from the virtual assets that were initially developed for theX-Box game entitled Full Spectrum Warrior which was inspired by a combat tactical training simulation, Full Spectrum Command.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Rizzo, Albert; Morie, Jacquelyn; Williams, Josh; Pair, Jarrell; Buckwalter, John Galen
Human Emotional State and its Relevance for Military VR Training Proceedings Article
In: Proceedings of the 11th International Conference on Human-Computer Interaction, Las Vegas, NV, 2005.
@inproceedings{rizzo_human_2005,
title = {Human Emotional State and its Relevance for Military VR Training},
author = {Albert Rizzo and Jacquelyn Morie and Josh Williams and Jarrell Pair and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Human%20Emotional%20State%20and%20its%20Relevance%20for%20Military%20VR%20Training.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 11th International Conference on Human-Computer Interaction},
address = {Las Vegas, NV},
abstract = {Combat environments by their nature can produce a dramatic range of emotional responses in military personnel. When immersed in the emotional "fog of war," the potential exists for optimal human decision-making and performance of goal-directed activities to be seriously compromised. This may be especially true when combat training is conducted under conditions that lack emotional engagement by the soldier. Real world military training often naturally includes stress induction that aims to promote a similarity of internal emotional stimulus cues with what is expected to be present on the battlefield. This approach to facilitating optimal training effectiveness is supported by a long history of learning theory research. Current Virtual Reality military training approaches are noteworthy in their emphasis on creating hi-fidelity graphic and audio realism with the aim to foster better transfer of training. However, less emphasis is typically placed on the creation of emotionally evocative virtual training scenarios that can induce emotional stress in a manner similar to what is typically experienced under real world training conditions. As well, emotional issues in the post-combat aftermath need to be addressed, as can be seen in the devastating emotional difficulties that occur in some military personnel following combat. This is evidenced by the number of recent medical reports that suggest the incidence of "Vietnam-levels" of combat-related Post Traumatic Stress Disorder symptomatology in returning military personnel from the Iraq conflict. In view of these issues, the USC Institute for Creative Technologies (ICT) has initiated a research program to study emotional issues that are relevant to VR military applications. This paper will present the rationale and status of two ongoing VR research programs at the ICT that address sharply contrasting ends of the emotional spectrum relevant to the military: 1. The Sensory Environments Evaluation (SEE) Project is examining basic factors that underlie emotion as it occurs within VR training environments and how this could impact transfer of training, and 2. The Full Spectrum Warrior (FSW) Post Traumatic Stress Disorder Project which is currently in the process of converting the existing FSW combat tactical simulation training scenario (and X-Box game) into a VR treatment system for the conduct of graduated exposure therapy in Iraq war military personnel with Post Traumatic Stress Disorder.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Einarsson, Per; Debevec, Paul
Acquisition of Time-Varying Participating Media Proceedings Article
In: SIGGRAPH, Los Angeles, CA, 2005.
@inproceedings{hawkins_acquisition_2005,
title = {Acquisition of Time-Varying Participating Media},
author = {Tim Hawkins and Per Einarsson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Acquisition%20of%20Time-Varying%20Participating%20Media.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {SIGGRAPH},
address = {Los Angeles, CA},
abstract = {We present a technique for capturing time-varying volumetric data of participating media. A laser sheet is swept repeatedly through the volume, and the scattered light is imaged using a high-speed camera. Each sweep of the laser provides a near-simultaneous volume of density values. We demonstrate rendered animations under changing viewpoint and illumination, making use of measured values for the scattering phase function and albedo.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Klimchuck, Dean; Mitura, Roman; Bowerly, Todd; Buckwalter, John Galen; Kerns, Kim; Randall, Karherine; Adams, Rebecca; Finn, Paul; Tarnanas, Ioannis; Sirbu, Cristian; Ollendick, Thomas H.; Yeh, Shih-Ching
A Virtual Reality Scenario for All Seasons: The Virtual Classroom Proceedings Article
In: Proceedings of the 11th International Conference on Human Computer Interaction, Las Vegas, NV, 2005.
@inproceedings{rizzo_virtual_2005,
title = {A Virtual Reality Scenario for All Seasons: The Virtual Classroom},
author = {Albert Rizzo and Dean Klimchuck and Roman Mitura and Todd Bowerly and John Galen Buckwalter and Kim Kerns and Karherine Randall and Rebecca Adams and Paul Finn and Ioannis Tarnanas and Cristian Sirbu and Thomas H. Ollendick and Shih-Ching Yeh},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Scenario%20for%20All%20Seasons-%20The%20Virtual%20Classroom%20(HCI).pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 11th International Conference on Human Computer Interaction},
address = {Las Vegas, NV},
abstract = {Rather than relying on costly physical mock-ups of functional assessment and rehabilitation environments, VR offers the option to produce and distribute identical "standard" environments. Within such digital assessment and rehabilitation scenarios, normative data can be accumulated for performance comparisons needed for assessment, diagnosis and for training purposes. As well, in this manner, reusable archetypic virtual environments constructed for one purpose, could also be applied for clinical applications addressing other purposes. This has now been done with the Virtual Classroom scenario. While originally developed as a controlled stimulus environment in which attention processes could be systematically assessed in children while in the presence of varying levels of distraction, the system is now finding use for other clinical targets. Such applications that are being developed and tested using the Virtual Classroom for other purposes include: 1. Expansion of the range of attention assessment tests (i.e., a _Stroop` Interference testing system for all ages). 2. A wide field of view system to study eye tracking under distracting conditions with ADHD children using an Elumens VisionStation®. 3. Development of the Virtual Classroom as a tool for anxiety assessment and graduated exposure therapy for children with Social Anxiety Disorder. 4. An extension to the class to include a maze of halls leading out of the school for an earthquake safety training application with persons with developmental and learning disabilities.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Lane, H. Chad; Lent, Michael; Solomon, Steve; Gomboc, Dave; Carpenter, Paul
Toward Question Answering for Simulations Proceedings Article
In: International Joint Conference on Artificial Intelligence (IJCAI) Workshop on Knowledge and Reasoning for Answering Questions, Edinburgh, Scotland, 2005.
@inproceedings{core_toward_2005,
title = {Toward Question Answering for Simulations},
author = {Mark Core and H. Chad Lane and Michael Lent and Steve Solomon and Dave Gomboc and Paul Carpenter},
url = {http://ict.usc.edu/pubs/Toward%20Question%20Answering%20for%20Simulations.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {International Joint Conference on Artificial Intelligence (IJCAI) Workshop on Knowledge and Reasoning for Answering Questions},
address = {Edinburgh, Scotland},
abstract = {The new research area of explainable artiï¬cial intelligence (XAI) allows users to question simulated entities whose motivations would otherwise be hidden. Here, we focus on the knowledge representation issues involved in building such systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Social Causality and Responsibility: Modeling and Evaluation Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA), pp. 191–204, Kos, Greece, 2005.
@inproceedings{mao_social_2005,
title = {Social Causality and Responsibility: Modeling and Evaluation},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Causality%20and%20Responsibility-%20Modeling%20and%20Evaluation.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA)},
pages = {191–204},
address = {Kos, Greece},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent systems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich the believability and the cognitive capabilities of social intelligent agents. In this paper, we present a general computational model of social causality and responsibility, and empirically evaluate and compare the model with several other approaches.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dialog Simulation for Background Characters Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
@inproceedings{jan_dialog_2005,
title = {Dialog Simulation for Background Characters},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dialog%20Simulation%20for%20Background%20Characters.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Background characters in virtual environments do not require the same amount of processing that is usually required by main characters, however we want simulation that is more believable than random behavior. We describe an algorithm that generates bhavior for background characters involved in conversation that supports dynamic changes to conversation group structure. We present an evaluation of this algorithm and make suggestions on how to further improve believability of the simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Gordon, Andrew S.; Kim, Julia
Learning the Lessons of Leadership Experience: Tools for Interactive Case Method Analysis Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
@inproceedings{hill_learning_2004,
title = {Learning the Lessons of Leadership Experience: Tools for Interactive Case Method Analysis},
author = {Randall W. Hill and Andrew S. Gordon and Julia Kim},
url = {http://ict.usc.edu/pubs/LEARNING%20THE%20LESSONS%20OF%20LEADERSHIP%20EXPERIENCE-%20TOOLS%20FOR%20INTERACTIVE%20CASE%20METHOD%20ANALYSIS.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {The Army Excellence in Leadership (AXL) project at the University of Southern California's Institute for Creative Technologies is aimed at supporting the acquisition of tacit knowledge of military leadership through the development of compelling filmed narratives of leadership scenarios and interactive training technologies. The approach taken in the AXL project is to leverage the best practices of case-method teaching and use Hollywood storytelling techniques to create fictional case studies (as filmed media) addressing specific leadership issues. In addition to authoring compelling cases for analysis, we have developed software prototypes that instantiate the case-method teaching approach. These systems engage individual trainees in human-computer dialogues that are focused on the leadership issues that have been embedded in the fictional cases.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Korris, James H.
Full Spectrum Warrior: How the Institute for Creative Technologies Built a Cognitive Training Tool for the XBox Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
@inproceedings{korris_full_2004,
title = {Full Spectrum Warrior: How the Institute for Creative Technologies Built a Cognitive Training Tool for the XBox},
author = {James H. Korris},
url = {http://ict.usc.edu/pubs/FULL%20SPECTRUM%20WARRIOR-%20HOW%20THE%20INSTITUTE%20FOR%20CREATIVE%20TECHNOLOGIES%20BUILT%20A%20COGNITIVE%20TRAINING%20TOOL%20FOR%20THE%20XBOX.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {Microsoft's popular game console, the Xbox, combined the possibility of compelling training efficiencies with formidable obstacles to development, both in terms of the business model, the limitation of the Windows 2000 computer inside it and the systemDs standard human-machine interface. In its mission to leverage the capabilities of the entertainment industry to develop next-generation simulation tools, the Institute for Creative Technologies turned to this inexpensive, powerful platform for its Squad level cognitive tactical trainer. This paper will describe the pedagogical and technological challenges and unique processes that translated Squad level command doctrine to a commercial game interface and a cost-effective, universally-accessible computational medium.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.