Publications
Search
Pair, Jarrell; Neumann, Ulrich; Piepol, Diane; Swartout, William
FlatWorld: Combining Hollywood Set-Design Techniques with VR Journal Article
In: IEEE Computer Graphics and Applications, no. January/February, 2003.
@article{pair_flatworld_2003,
title = {FlatWorld: Combining Hollywood Set-Design Techniques with VR},
author = {Jarrell Pair and Ulrich Neumann and Diane Piepol and William Swartout},
editor = {Lawrence Rosenblum and Macedonia},
url = {http://ict.usc.edu/pubs/FlatWorld-%20Combining%20Hollywood%20Set-Design%20Techniques%20with%20VR.pdf},
year = {2003},
date = {2003-01-01},
journal = {IEEE Computer Graphics and Applications},
number = {January/February},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Habash, Nizar; Dorr, Bonnie; Traum, David
Hybrid Natural Language Generation from Lexical Conceptual Structures Journal Article
In: Machine Translation, vol. 18, pp. 81–127, 2003.
@article{habash_hybrid_2003,
title = {Hybrid Natural Language Generation from Lexical Conceptual Structures},
author = {Nizar Habash and Bonnie Dorr and David Traum},
url = {http://ict.usc.edu/pubs/Hybrid%20Natural%20Language%20Generation%20from%20Lexical%20%20Conceptual%20Structures.pdf},
year = {2003},
date = {2003-01-01},
journal = {Machine Translation},
volume = {18},
pages = {81–127},
abstract = {This paper describes Lexogen, a system for generating natural-language sentences from Lexical Conceptual Structure, an interlingual representation. The system has been developed as part of a Chinese–English Machine Translation (MT) system; however, it is designed to be used for many other MT language pairs and natural language applications. The contributions of this work include: (1) development of a large-scale Hybrid Natural Language Generation system with language-independent components; (2) enhancements to an interlingual representation and asso- ciated algorithm for generation from ambiguous input; (3) development of an efficient reusable language-independent linearization module with a grammar description language that can be used with other systems; (4) improvements to an earlier algorithm for hierarchically mapping thematic roles to surface positions; and (5) development of a diagnostic tool for lexicon coverage and correct- ness and use of the tool for verification of English, Spanish, and Chinese lexicons. An evaluation of Chinese–English translation quality shows comparable performance with a commercial translation system. The generation system can also be extended to other languages and this is demonstrated and evaluated for Spanish.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Debevec, Paul
A Tutorial on Image-Based Lighting Journal Article
In: IEEE Computer Graphics and Applications, 2002.
@article{debevec_tutorial_2002,
title = {A Tutorial on Image-Based Lighting},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Lighting.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Computer Graphics and Applications},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pighin, Frédéric; Szeliski, Richard; Salesin, David H.
Modeling and Animating Realistic Faces from Images Journal Article
In: International Journal on Computer Vision, vol. 50, pp. 143–169, 2002.
@article{pighin_modeling_2002,
title = {Modeling and Animating Realistic Faces from Images},
author = {Frédéric Pighin and Richard Szeliski and David H. Salesin},
url = {http://ict.usc.edu/pubs/Modeling%20and%20Animating%20Realistic%20Faces%20from%20Images.pdf},
year = {2002},
date = {2002-01-01},
journal = {International Journal on Computer Vision},
volume = {50},
pages = {143–169},
abstract = {We present a new set of techniques f or mo deling and animating realistic f aces f rom photographs and videos. Given a set of face photographs taken simultaneously, our modeling technique allows the interactive recovery of a textured 3D face model. By repeating this process for several facial expressions, we acquire a set of faces models that can be linearly combined to express a wide range of expressions. Given a video sequence, this linear face model can be used to estimate the face position, orientation, and facial expression at each frame. We illustrate these techniques on several datasets and demonstrate robust estimations of detailed face geometry and motion.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rickel, Jeff; Marsella, Stacy C.; Gratch, Jonathan; Hill, Randall W.; Traum, David; Swartout, William
Toward a New Generation of Virtual Humans for Interactive Experiences Journal Article
In: IEEE Intelligent Systems, 2002.
@article{rickel_toward_2002,
title = {Toward a New Generation of Virtual Humans for Interactive Experiences},
author = {Jeff Rickel and Stacy C. Marsella and Jonathan Gratch and Randall W. Hill and David Traum and William Swartout},
url = {http://ict.usc.edu/pubs/Toward%20a%20New%20Generation%20of%20Virtual%20Humans%20for%20Interactive%20Experiences.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Rickel, Jeff; André, Elisabeth; Cassell, Justine; Petajan, Eric; Badler, Norman
Creating Interactive Virtual Humans: Some Assembly Required Journal Article
In: IEEE Intelligent Systems, pp. 54–63, 2002.
@article{gratch_creating_2002,
title = {Creating Interactive Virtual Humans: Some Assembly Required},
author = {Jonathan Gratch and Jeff Rickel and Elisabeth André and Justine Cassell and Eric Petajan and Norman Badler},
url = {http://ict.usc.edu/pubs/Creating%20Interactive%20Virtual%20Humans-%20Some%20Assembly%20Required.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
pages = {54–63},
abstract = {Science fiction has long imagined a future populated with artificial humans–human-looking devices with human-like intelligence. Although Asimov's benevolent robots and the Terminator movies' terrible war machines are still a distant fantasy, researchers across a wide range of disciplines are beginning to work together toward a more modest goal–building virtual humans. These software entities look and act like people and can engage in conversation and collaborative tasks, but they live in simulated environments. With the untidy problems of sensing and acting in the physical world thus dispensed, the focus of virtual human research is on capturing the richness and dynamics of human behavior.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.
Browsing Image Collections with Representations of Commonsense Activities Journal Article
In: Journal of the American Society for Information Science and Technology, vol. 52, no. 11, pp. 925–929, 2001.
@article{gordon_browsing_2001,
title = {Browsing Image Collections with Representations of Commonsense Activities},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Browsing%20Image%20Collections%20with%20Representations%20of%20Commonsense%20Activities.PDF},
year = {2001},
date = {2001-01-01},
journal = {Journal of the American Society for Information Science and Technology},
volume = {52},
number = {11},
pages = {925–929},
abstract = {To support browsing-based subject access to image collections, it is necessary to provide users with networks of subject terms that are organized in an intuitive, richly interconnected manner. A principled approach to this task is to organize the subject terms by their relationship to activity contexts that are commonly understood among users. This article describes a methodology for creating networks of subject terms by manually representing a large number of common-sense activities that are broadly related to image subject terms. The application of this methodology to the Library of Congress Thesaurus for Graphic Materials produced 768 representations that supported users of a prototype browsing-based retrieval system in searching large, indexed photograph collections.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Moutchtaris, Athanasios; Reveliotis, Panagiotis; Kyriakakis, Chris
Inverse Filter Design for Immersive Audio Rendering Over Loudspeakers Journal Article
In: IEEE Transactions on Multimedia, vol. 2, no. 2, pp. 77–87, 2000.
@article{moutchtaris_inverse_2000,
title = {Inverse Filter Design for Immersive Audio Rendering Over Loudspeakers},
author = {Athanasios Moutchtaris and Panagiotis Reveliotis and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Inverse%20Filter%20Design%20for%20Immersive%20Audio%20Rendering%20Over%20Loudspeakers.pdf},
year = {2000},
date = {2000-06-01},
journal = {IEEE Transactions on Multimedia},
volume = {2},
number = {2},
pages = {77–87},
abstract = {Immersive audio systems can be used to render virtual sound sources in three-dimensional (3-D) space around a listener. This is achieved by simulating the head-related transfer function (HRTF) amplitude and phase characteristics using digital filters. In this paper, we examine certain key signal processing considerations in spatial sound rendering over headphones and loudspeakers. We address the problem of crosstalk inherent in loudspeaker rendering and examine two methods for implementing crosstalk cancellation and loudspeaker frequency response inversion in real time. We demonstrate that it is possible to achieve crosstalk cancellation of 30 dB using both methods, but one of the two (the Fast RLS Transversal Filter Method) offers a significant advantage in terms of computational efficiency. Our analysis is easily extendable to nonsymmetric listening positions and moving listeners.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kyriakakis, Chris; Tsakalides, Panagiotis; Holman, Tomlinson
Surrounded by Sound: Acquisition and Rendering Methods for Immersive Audio Journal Article
In: Signal Processing Magazine, IEEE, vol. 16, no. 1, pp. 55–66, 1999, ISSN: 1053-5888.
@article{kyriakakis_surrounded_1999,
title = {Surrounded by Sound: Acquisition and Rendering Methods for Immersive Audio},
author = {Chris Kyriakakis and Panagiotis Tsakalides and Tomlinson Holman},
url = {http://ict.usc.edu/pubs/Surrounded%20by%20Sound-%20Acquisition%20and%20Rendering%20Methods%20for%20Immersive%20Audio.pdf},
doi = {10.1109/79.743868},
issn = {1053-5888},
year = {1999},
date = {1999-01-01},
journal = {Signal Processing Magazine, IEEE},
volume = {16},
number = {1},
pages = {55–66},
abstract = {The authors discuss immersive audio systems and the signal processing issues that pertain to the acquisition and subsequent rendering of 3D sound fields over loudspeakers. On the acquisition side, recent advances in statistical methods for achieving acoustical arrays in audio applications are reviewed. Classical array signal processing addresses two major aspects of spatial filtering, namely localization of a signal of interest, and adaptation of the spatial response of an array of sensors to achieve steering in a given direction. The achieved spatial focusing in the direction of interest makes array signal processing a necessary component in immersive sound acquisition systems. On the rendering side, 3D audio signal processing methods are described that allow rendering of virtual sources around the listener using only two loudspeakers. Finally, the authors discuss the commercial implications of audio DSP.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Traum, David; Andersen, Carl F.; Chong, Waiyian; Josyula, Darsana; Okamoto, Yoshi; Purang, Khemdut; O'Donovan-Anderson, Michael; Perlis, Don
Representations of Dialogue State for Domain and Task Independent Meta-Dialogue Journal Article
In: Electronic Transactions on Artificial Intelligence, vol. 3, pp. 125–152, 1999.
@article{traum_representations_1999,
title = {Representations of Dialogue State for Domain and Task Independent Meta-Dialogue},
author = {David Traum and Carl F. Andersen and Waiyian Chong and Darsana Josyula and Yoshi Okamoto and Khemdut Purang and Michael O'Donovan-Anderson and Don Perlis},
url = {http://ict.usc.edu/pubs/Representations%20of%20Dialogue%20State%20for%20Domain%20and%20Task%20Independent%20Meta-Dialogue.pdf},
year = {1999},
date = {1999-01-01},
journal = {Electronic Transactions on Artificial Intelligence},
volume = {3},
pages = {125–152},
abstract = {We propose a representation of local dialogue context motivated by the need to react appropriately to meta-dialogue, such as various sorts of corrections to the sequence of an instruction and response action. Such contexts includes at least the following aspects: the words and linguistic structures uttered, the domain correlates of those linguistic structures, and plans and actions in response. Each of these is needed as part of the context in order to be able to correctly interpret the range of possible corrections. Partitioning knowledge of dialogue structure in this way may lead to an ability to represent generic dialogue structure (e.g., in the form of axioms), which can be particularized to the domain, topic and content of the dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing Journal Article
In: pp. 9, 0000.
@article{gratch_emotion_nodate,
title = {Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing},
author = {Jonathan Gratch},
pages = {9},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 0000.
@article{gervits_classication-based_nodate,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 0000.
@article{hartholt_introducing_nodate,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
pages = {11},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 0000.
@article{hartholt_introducing_nodate-1,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
pages = {11},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: pp. 35, 0000.
@article{hartholt_combat_nodate,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Arno Hartholt and Sharon Mozgai},
pages = {35},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
Sorry, no publications matched your criteria.