Publications
Search
Jurik, Joel; Burnett, Thomas; Klug, Michael; Debevec, Paul
Geometry-Corrected Light Field Rendering for Creating a Holographic Stereogram Proceedings Article
In: CVPR Workshop for Computational Cameras and Displays, Providence, RI, 2012.
@inproceedings{jurik_geometry-corrected_2012,
title = {Geometry-Corrected Light Field Rendering for Creating a Holographic Stereogram},
author = {Joel Jurik and Thomas Burnett and Michael Klug and Paul Debevec},
url = {http://ict.usc.edu/pubs/Geometry-Corrected%20Light%20Field%20Rendering%20for%20Creating%20a%20Holographic%20Stereogram.pdf},
year = {2012},
date = {2012-06-01},
booktitle = {CVPR Workshop for Computational Cameras and Displays},
address = {Providence, RI},
abstract = {We present a technique to record and process a light field of an object in order to produce a printed holographic stereogram. We use a geometry correction process to maximize the depth of field and depth-dependent surface detail even when the array of viewpoints comprising the light field is coarsely sampled with respect to the angular resolution of the printed hologram. We capture the light field data of an object with a digital still camera attached to a 2D translation stage, and generate hogels (holographic elements) for printing by reprojecting the light field onto a photogrammetrically recovered model of the object and querying the relevant rays to be produced by the hologram with respect to this geometry. This results in a significantly clearer image of detail at different depths in the printed holographic stereogram.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Wang, Yi-Hua; Fyffe, Graham; Chen, Bing-Yu; Debevec, Paul
A blendshape model that incorporates physical interaction Journal Article
In: Computer Animation and Virtual Worlds, vol. 23, no. 3-4, pp. 235–243, 2012.
@article{ma_blendshape_2012,
title = {A blendshape model that incorporates physical interaction},
author = {Wan-Chun Ma and Yi-Hua Wang and Graham Fyffe and Bing-Yu Chen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20blendshape%20model%20that%20incorporates%20physical%20interaction-2.pdf},
doi = {10.1002/cav.1441},
year = {2012},
date = {2012-05-01},
journal = {Computer Animation and Virtual Worlds},
volume = {23},
number = {3-4},
pages = {235–243},
abstract = {The linear blendshape technique has been intensively used for computer animation and games because of its simplicity and effectiveness. However, it cannot describe rotational deformations and deformations because of self collision or scene interaction. In this paper, we present a new technique to address these two major limitations by introducing physical-based simulation to blendshapes. The proposed technique begins by constructing a mass–spring system for each blendshape target. Each system is initialized in its steady state by setting the rest length of each spring as the edge length of the corresponding target. To begin shape interpolation, we linearly interpolate the rest lengths of the springs according to a given interpolation factor α ∈ [0,1]. The interpolated shape is then generated by computing the equilibrium of the mass–spring system with the interpolated rest lengths. Results from our technique show physically plausible deformations even in the case of large rotations between blendshape targets. In addition, the new blendshape model is able to interact with other scene elements by introducing collision detection and handling to the mass–spring system.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Guarnera, Giuseppe Claudio; Peers, Pieter; Debevec, Paul; Ghosh, Abhijeet
Estimating Surface Normals from Spherical Stokes Reflectance Fields Proceedings Article
In: ECCV Workshop on Color and Photometry in Computer Vision (CPCV), Firenze, Italy, 2012.
@inproceedings{guarnera_estimating_2012,
title = {Estimating Surface Normals from Spherical Stokes Reflectance Fields},
author = {Giuseppe Claudio Guarnera and Pieter Peers and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Estimating%20Surface%20Normals%20from%20Spherical%20Stokes%20Reflectance%20Fields.pdf},
year = {2012},
date = {2012-03-01},
booktitle = {ECCV Workshop on Color and Photometry in Computer Vision (CPCV)},
address = {Firenze, Italy},
abstract = {In this paper we introduce a novel technique for estimating surface normals from the four Stokes polarization parameters of specularly reflected light under a single spherical incident lighting condition that is either unpolarized or circularly polarized. We illustrate the practicality of our technique by estimating surface normals under uncontrolled outdoor illumination from just four observations from a fixed viewpoint.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham
High Fidelity Facial Hair Capture Proceedings Article
In: SIGGRAPH, Playa Vista, CA, 2012.
@inproceedings{fyffe_high_2012-1,
title = {High Fidelity Facial Hair Capture},
author = {Graham Fyffe},
url = {http://ict.usc.edu/pubs/High%20Fidelity%20Facial%20Hair%20Capture.pdf},
year = {2012},
date = {2012-01-01},
booktitle = {SIGGRAPH},
number = {ICT TR 02 2012},
address = {Playa Vista, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Modeling human hair from photographs is a topic of ongoing interest to the graphics community. Yet, the literature is predominantly concerned with the hair volume on the scalp, and it remains difficult to capture digital characters with interesting facial hair. Recent stereo-vision-based facial capture systems (e.g. [Furukawa and Ponce 2010][Beeler et al. 2010]) are capable of capturing extremely fine facial detail from high resolution photographs, but any facial hair present on the subject is reconstructed as a blobby mass. Prior work in facial hair photo-modeling is based on learned priors and image cues [Herrera et al. ], and does not reconstruct the individual hairs belonging uniquely to the subject. We propose a method for capturing the three dimensional shape of complex, multi-colored facial hair from a small number of photographs taken simultaneously under uniform illumination. The method produces a set of oriented hair particles, suitable for point-based rendering.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Wang, Yi-Hua; Fyffe, Graham; Barbic, Jernej; Chen, Bing-Yu; Debevec, Paul
A blendshape model that incorporates physical interaction Proceedings Article
In: SIGGRAPH Asia, Hong Kong, 2011, ISBN: 978-1-4503-1137-3.
@inproceedings{ma_blendshape_2011,
title = {A blendshape model that incorporates physical interaction},
author = {Wan-Chun Ma and Yi-Hua Wang and Graham Fyffe and Jernej Barbic and Bing-Yu Chen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20blendshape%20model%20that%20incorporates%20physical%20interaction.pdf},
doi = {10.1145/2073304.2073343},
isbn = {978-1-4503-1137-3},
year = {2011},
date = {2011-12-01},
booktitle = {SIGGRAPH Asia},
address = {Hong Kong},
abstract = {We present a new technique for physically-plausible shape blending by interpolating the spring rest length parameters of a mass-spring system. This blendshape method begins by constructing two consistent mass-spring systems (i.e., with vertex-wise correspondence and the same topology) for source and target shapes, respectively, and setting the two systems as in their static states. In other words, their edge lengths equal to the rest lengths of the springs. To create an intermediate pose, we generate a new mass-spring system consistent with the source and target ones and set its rest lengths as linearly interpolated between source and target based on an interpolation factor α ε [0, 1]. The new pose is then synthesized by computing the equilibrium given the interpolated rest lengths. In addition, the mass-spring system may interact with other objects in the environment by incorporating collision detection.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Fyffe, Graham; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Debevec, Paul
Multiview Face Capture using Polarized Spherical Gradient Illumination Proceedings Article
In: Proceedings of SIGGRAPH Asia 2011/ACM Trans. on Graphics, 2011.
@inproceedings{ghosh_multiview_2011,
title = {Multiview Face Capture using Polarized Spherical Gradient Illumination},
author = {Abhijeet Ghosh and Graham Fyffe and Borom Tunwattanapong and Jay Busch and Xueming Yu and Paul Debevec},
url = {http://ict.usc.edu/pubs/Multiview%20Face%20Capture%20using%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2011},
date = {2011-12-01},
booktitle = {Proceedings of SIGGRAPH Asia 2011/ACM Trans. on Graphics},
volume = {30(6)},
abstract = {We present a novel process for acquiring detailed facial geometry with high resolution diffuse and specular photometric information from multiple viewpoints using polarized spherical gradient illumination. Key to our method is a new pair of linearly polarized lighting patterns which enables multiview diffuse-specular separation under a given spherical illumination condition from just two photographs. The patterns – one following lines of latitude and one following lines of longitude – allow the use of fixed linear polarizers in front of the cameras, enabling more efficient acquisition of diffuse and specular albedo and normal maps from multiple viewpoints. In a second step, we employ these albedo and normal maps as input to a novel multi-resolution adaptive domain message passing stereo reconstruction algorithm to create high resolution facial geometry. To do this, we formulate the stereo reconstruction from multiple cameras in a commonly parameterized domain for multiview reconstruction. We show competitive results consisting of high-resolution facial geometry with relightable reflectance maps using five DSLR cameras. Our technique scales well for multiview acquisition without requiring specialized camera systems for sensing multiple polarization states.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Fyffe, Graham; Yu, Xueming; Ma, Wan-Chun; Busch, Jay; Ichikari, Ryosuke; Bolas, Mark; Debevec, Paul
Head-mounted Photometric Stereo for Performance Capture Proceedings Article
In: 8th European Conference on Visual Media Production (CVMP 2011), London, UK, 2011.
@inproceedings{jones_head-mounted_2011,
title = {Head-mounted Photometric Stereo for Performance Capture},
author = {Andrew Jones and Graham Fyffe and Xueming Yu and Wan-Chun Ma and Jay Busch and Ryosuke Ichikari and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Head-mounted%20Photometric%20Stereo%20for%20Performance%20Capture.pdf},
year = {2011},
date = {2011-11-01},
booktitle = {8th European Conference on Visual Media Production (CVMP 2011)},
address = {London, UK},
abstract = {Head-mounted cameras are an increasingly important tool for capturing facial performances to drive virtual characters. They provide a fixed, unoccluded view of the face, useful for observing motion capture dots or as input to video analysis. However, the 2D imagery captured with these systems is typically affected by ambient light and generally fails to record subtle 3D shape changes as the face performs. We have developed a system that augments a head-mounted camera with LED-based photometric stereo. The system allows observation of the face independent of the ambient light and generates per-pixel surface normals so that the performance is recorded dynamically in 3D. The resulting data can be used for facial relighting or as better input to machine learning algorithms for driving an animated face.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Practical Image-Based Relighting and Editing with Spherical-Harmonics and Local Lights Proceedings Article
In: European Conference on Visual Media and Production (CVMP), 2011.
@inproceedings{tunwattanapong_practical_2011,
title = {Practical Image-Based Relighting and Editing with Spherical-Harmonics and Local Lights},
author = {Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Practical%20Image%20Based%20Relighting%20and%20Editing%20with%20Spherical%20Harmonics%20and%20Local%20Lights.pdf},
year = {2011},
date = {2011-11-01},
booktitle = {European Conference on Visual Media and Production (CVMP)},
abstract = {We present a practical technique for image-based relighting under environmental illumination which greatly reduces the number of required photographs compared to traditional techniques, while still achieving high quality editable relighting results. The proposed method employs an optimization procedure to combine spherical harmonics, a global lighting basis, with a set of local lights. Our choice of lighting basis captures both low and high frequency components of typical surface reflectance functions while generating close approximations to the ground truth with an order of magnitude less data. This technique benefits the acquisition process by reducing the number of required photographs, while simplifying the modification of reflectance data and enabling artistic lighting edits for post-production effects. Here, we demonstrate two desirable lighting edits, modifying light intensity and angular width, employing the proposed lighting basis.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Fyffe, Graham; Debevec, Paul
Optimized Local Blendshape Mapping for Facial Motion Retargeting Proceedings Article
In: SIGGRAPH 2011, Vancouver, Canada, 2011.
@inproceedings{ma_optimized_2011,
title = {Optimized Local Blendshape Mapping for Facial Motion Retargeting},
author = {Wan-Chun Ma and Graham Fyffe and Paul Debevec},
url = {http://ict.usc.edu/pubs/Optimized%20Local%20Blendshape%20Mapping%20for%20Facial%20Motion%20Retargeting.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {SIGGRAPH 2011},
address = {Vancouver, Canada},
abstract = {One of the popular methods for facial motion retargeting is local blendshape mapping [Pighin and Lewis 2006], where each local facial region is controlled by a tracked feature (for example, a vertex in motion capture data). To map a target motion input onto blendshapes, a pose set is chosen for each facial region with minimal retargeting error. However, since the best pose set for each region is chosen independently, the solution likely has unorganized pose sets across the face regions, as shown in Figure 1(b). Therefore, even though every pose set matches the local features, the retargeting result is not guaranteed to be spatially smooth. In addition, previous methods ignored temporal coherence which is key for jitter-free results.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wilson, Cyrus A.; Alexander, Oleg; Tunwattanapong, Borom; Peers, Pieter; Ghosh, Abhijeet; Busch, Jay; Hartholt, Arno; Debevec, Paul
Facial Cartography: Interactive Scan Correspondence Proceedings Article
In: ACM/Eurographics Symposium on Computer Animation, 2011.
@inproceedings{wilson_facial_2011,
title = {Facial Cartography: Interactive Scan Correspondence},
author = {Cyrus A. Wilson and Oleg Alexander and Borom Tunwattanapong and Pieter Peers and Abhijeet Ghosh and Jay Busch and Arno Hartholt and Paul Debevec},
url = {http://ict.usc.edu/pubs/Facial%20Cartography-%20Interactive%20Scan%20Correspondence.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {ACM/Eurographics Symposium on Computer Animation},
abstract = {We present a semi-automatic technique for computing surface correspondences between 3D facial scans in different expressions, such that scan data can be mapped into a common domain for facial animation. The technique can accurately correspond high-resolution scans of widely differing expressions – without requiring intermediate pose sequences – such that they can be used, together with reflectance maps, to create high-quality blendshape-based facial animation. We optimize correspondences through a combination of Image, Shape, and Internal forces, as well as Directable forces to allow a user to interactively guide and refine the solution. Key to our method is a novel representation, called an Active Visage, that balances the advantages of both deformable templates and correspondence computation in a 2D canonical domain. We show that our semi-automatic technique achieves more robust results than automated correspondence alone, and is more precise than is practical with unaided manual input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Hawkins, Tim; Watts, Chris; Ma, Wan-Chun; Debevec, Paul
Comprehensive Facial Performance Capture Proceedings Article
In: Eurographics 2011, 2011.
@inproceedings{fyffe_comprehensive_2011,
title = {Comprehensive Facial Performance Capture},
author = {Graham Fyffe and Tim Hawkins and Chris Watts and Wan-Chun Ma and Paul Debevec},
url = {http://ict.usc.edu/pubs/Comprehensive%20Facial%20Performance%20Capture.pdf},
year = {2011},
date = {2011-04-01},
booktitle = {Eurographics 2011},
abstract = {We present a system for recording a live dynamic facial performance, capturing highly detailed geometry and spatially varying diffuse and specular reflectance information for each frame of the performance. The result is a reproduction of the performance that can be rendered from novel viewpoints and novel lighting conditions, achieving photorealistic integration into any virtual environment. Dynamic performances are captured directly, without the need for any template geometry or static geometry scans, and processing is completely automatic, requiring no human input or guidance. Our key contributions are a heuristic for estimating facial reflectance information from gradient illumination photographs, and a geometry optimization framework that maximizes a principled likelihood function combining multi-view stereo correspondence and photometric stereo, using multi-resolution belief propagation. The output of our system is a sequence of geometries and reflectance maps, suitable for rendering in off-the-shelf software. We show results from our system rendered under novel viewpoints and lighting conditions, and validate our results by demonstrating a close match to ground truth photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ko-Yun; Ma, Wan-Chun; Chang, Chun-Fa; Wang, Chuan-Chang; Debevec, Paul
A framework for locally retargeting and rendering facial performance Proceedings Article
In: Computer Animation and Virtual Worlds, pp. 159–167, 2011.
@inproceedings{liu_framework_2011,
title = {A framework for locally retargeting and rendering facial performance},
author = {Ko-Yun Liu and Wan-Chun Ma and Chun-Fa Chang and Chuan-Chang Wang and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Framework%20for%20Locally%20Retargeting%20and%20Rendering%20Facial%20Performance.pdf},
year = {2011},
date = {2011-04-01},
booktitle = {Computer Animation and Virtual Worlds},
volume = {22},
pages = {159–167},
abstract = {We present a facial motion retargeting method that enables the control of a blendshape rig according to marker-based motion capture data. The main purpose of the proposed technique is to allow a blendshape rig to create facial expressions, which conforms best to the current motion capture input, regardless the underlying blendshape poses. In other words, even though all of the blendshape poses may comprise symmetrical facial expressions only, our method is still able to create asymmetrical expressions without physically splitting any of them into more local blendshape poses. An automatic segmentation technique based on the analysis of facial motion is introduced to create facial regions for local retargeting. We also show that it is possible to blend normal maps for rendering in the same framework. Rendering with the blended normal map significantly improves surface appearance and details.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Ghosh, Abhijeet; Debevec, Paul; Morency, Louis-Philippe
Effect of Illumination on Automatic Expression Recognition: A Novel 3D Relightable Facial Database Proceedings Article
In: Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition, Santa Barbara, CA, 2011.
@inproceedings{stratou_effect_2011,
title = {Effect of Illumination on Automatic Expression Recognition: A Novel 3D Relightable Facial Database},
author = {Giota Stratou and Abhijeet Ghosh and Paul Debevec and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Effect%20of%20Illumination%20on%20Automatic%20Expression%20Recognition-%20A%20Novel%203D%20Relightable%20Facial%20Database.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition},
address = {Santa Barbara, CA},
abstract = {One of the main challenges in facial expression recognition is illumination invariance. Our long-term goal is to develop a system for automatic facial expression recognition that is robust to light variations. In this paper, we introduce a novel 3D Relightable Facial Expression (ICT-3DRFE) database that enables experimentation in the fields of both computer graphics and computer vision. The database contains 3D models for 23 subjects and 15 expressions, as well as photometric information that allow for photorealistic rendering. It is also facial action units annotated, using FACS standards. Using the ICT-3DRFE database we create an image set of different expressions/illuminations to study the effect of illumination on automatic expression recognition. We compared the output scores from automatic recognition with expert FACS annotations and found that they agree when the illumination is uniform. Our results show that the output distribution of the automatic recognition can change significantly with light variations and sometimes causes the discrimination of two different expressions to be diminished. We propose a ratio-based light transfer method, to factor out unwanted illuminations from given images and show that it reduces the effect of illumination on expression recognition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Tchou, Chris; Gardner, Andrew; Hawkins, Tim; Poullis, Charis; Stumpfel, Jessi; Jones, Andrew; Yun, Nathaniel; Einarsson, Per; Lundgren, Therese; Fajardo, Marcos
Digitizing the Parthenon: Estimating Surface Reflectance under Measured Natural Illumination Book Section
In: Gallo, Giovanni (Ed.): Digital Imaging for Cultural Heritage Preservation: Analysis, Restoration, and Reconstruction of Ancient Artworks, pp. 159–182, CRC Press, 2011, ISBN: 978-1-4398-2173-2.
@incollection{debevec_digitizing_2011,
title = {Digitizing the Parthenon: Estimating Surface Reflectance under Measured Natural Illumination},
author = {Paul Debevec and Chris Tchou and Andrew Gardner and Tim Hawkins and Charis Poullis and Jessi Stumpfel and Andrew Jones and Nathaniel Yun and Per Einarsson and Therese Lundgren and Marcos Fajardo},
editor = {Giovanni Gallo},
url = {http://ict.usc.edu/pubs/Digitizing%20the%20Parthenon-%20Estimating%20Surface%20Reflectance%20under%20Measured%20Natural%20Illumination.pdf},
isbn = {978-1-4398-2173-2},
year = {2011},
date = {2011-01-01},
booktitle = {Digital Imaging for Cultural Heritage Preservation: Analysis, Restoration, and Reconstruction of Ancient Artworks},
pages = {159–182},
publisher = {CRC Press},
abstract = {This edition presents the most prominent topics and applications of digital image processing, analysis, and computer graphics in the field of cultural heritage preservation. The text assumes prior knowledge of digital image processing and computer graphics fundamentals. Each chapter contains a table of contents, illustrations, and figures that elucidate the presented concepts in detail, as well as a chapter summary and a bibliography for further reading. Well-known experts cover a wide range of topics and related applications, including spectral imaging, automated restoration, computational reconstruction, digital reproduction, and 3D models.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Jurik, Joel; Jones, Andrew; Bolas, Mark; Debevec, Paul
Prototyping a Light Field Display Involving Direct Observation of a Video Projector Array Proceedings Article
In: IEEE International Workshop on Projector–Camera Systems (PROCAMS), Colorado Springs, CO, 2011.
@inproceedings{jurik_prototyping_2011,
title = {Prototyping a Light Field Display Involving Direct Observation of a Video Projector Array},
author = {Joel Jurik and Andrew Jones and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Prototyping%20a%20Light%20Field%20Display%20Involving%20Direct%20Observation%20of%20a%20Video%20Projector%20Array.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {IEEE International Workshop on Projector–Camera Systems (PROCAMS)},
address = {Colorado Springs, CO},
abstract = {We present a concept for a full-parallax light field display achieved by having users look directly into an array of video projectors. Each projector acts as one angularly varying pixel, so the display's spatial resolution depends on the number of video projectors and the angular resolution depends on the pixel resolution of any one video projector. We prototype a horizontal-parallax-only arrangement by mechanically moving a single pico-projector to an array of positions, and use long-exposure photography to simulate video of a horizontal array. With this setup, we determine the minimal projector density required to produce a continuous image, and describe practical ways to achieve such density and to realize the resulting system. We finally show that if today's pico-projectors become sufficiently inexpensive, immersive full-parallax displays with arbitrarily high spatial and angular resolution will become possible.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Traum, David; Artstein, Ron; Noren, Dan; Debevec, Paul; Bronnenkant, Kerry; Williams, Josh; Leuski, Anton; Narayanan, Shrikanth; Piepol, Diane; Lane, H. Chad; Morie, Jacquelyn; Aggarwal, Priti; Liewer, Matt; Chiang, Jen-Yuan; Gerten, Jillian; Chu, Selina; White, Kyle
Virtual Museum Guides Demonstration Proceedings Article
In: IEEE Workshop on Spoken Language Technology, Berkeley, CA, 2010.
@inproceedings{swartout_virtual_2010,
title = {Virtual Museum Guides Demonstration},
author = {William Swartout and David Traum and Ron Artstein and Dan Noren and Paul Debevec and Kerry Bronnenkant and Josh Williams and Anton Leuski and Shrikanth Narayanan and Diane Piepol and H. Chad Lane and Jacquelyn Morie and Priti Aggarwal and Matt Liewer and Jen-Yuan Chiang and Jillian Gerten and Selina Chu and Kyle White},
url = {http://ict.usc.edu/pubs/Virtual%20Museum%20Guides%20Demonstration.pdf},
year = {2010},
date = {2010-12-01},
booktitle = {IEEE Workshop on Spoken Language Technology},
address = {Berkeley, CA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Chen, Tongbo; Peers, Pieter; Wilson, Cyrus A.; Debevec, Paul
Circularly Polarized Spherical Illumination Reflectometry Proceedings Article
In: SIGGRAPH Asia, 2010.
@inproceedings{ghosh_circularly_2010,
title = {Circularly Polarized Spherical Illumination Reflectometry},
author = {Abhijeet Ghosh and Tongbo Chen and Pieter Peers and Cyrus A. Wilson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Circularly%20Polarized%20Spherical%20Illumination%20Reflectometry.pdf},
year = {2010},
date = {2010-12-01},
booktitle = {SIGGRAPH Asia},
abstract = {We present a novel method for surface reflectometry from a few ob- servations of a scene under a single uniform spherical field of circu- larly polarized illumination. The method is based on a novel anal- ysis of the Stokes reflectance field of circularly polarized spherical illumination and yields per-pixel estimates of diffuse albedo, spec- ular albedo, index of refraction, and specular roughness of isotropic BRDFs. To infer these reflectance parameters, we measure the Stokes parameters of the reflected light at each pixel by taking four photographs of the scene, consisting of three photographs with dif- ferently oriented linear polarizers in front of the camera, and one additional photograph with a circular polarizer. The method only assumes knowledge of surface orientation, for which we make a few additional photometric measurements. We verify our method with three different lighting setups, ranging from specialized to off-the- shelf hardware, which project either discrete or continuous fields of spherical illumination. Our technique offers several benefits: it estimates a more detailed model of per-pixel surface reflectance pa- rameters than previous work, it requires a relatively small number of measurements, it is applicable to a wide range of material types, and it is completely viewpoint independent.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Yu, Xueming; Debevec, Paul
Single-Shot Photometric Stereo by Spectral Multiplexing Proceedings Article
In: SIGGRAPH Asia, Seoul, South Korea, 2010.
@inproceedings{fyffe_single-shot_2010,
title = {Single-Shot Photometric Stereo by Spectral Multiplexing},
author = {Graham Fyffe and Xueming Yu and Paul Debevec},
url = {http://www.ict.usc.edu/pubs/Single-Shot%20Photometric%20Stereo%20by%20Spectral%20Multiplexing.pdf},
year = {2010},
date = {2010-12-01},
booktitle = {SIGGRAPH Asia},
address = {Seoul, South Korea},
abstract = {We propose a novel method for single-shot photometric stereo by spectral multiplexing. The output of our method is a simultaneous per-pixel estimate of the surface normal and full-color reflectance. Our method is well suited to materi- als with varying color and texture, requires no time-varying illumination, and no high-speed cameras. Being a single- shot method, it may be applied to dynamic scenes with- out any need for optical flow. Our key contributions are a generalization of three-color photometric stereo to more than three color channels, and the design of a practical six- color-channel system using off-the-shelf parts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Traum, David; Artstein, Ron; Noren, Dan; Debevec, Paul; Bronnenkant, Kerry; Williams, Josh; Leuski, Anton; Narayanan, Shrikanth; Piepol, Diane; Lane, H. Chad; Morie, Jacquelyn; Aggarwal, Priti; Liewer, Matt; Chiang, Jen-Yuan; Gerten, Jillian; Chu, Selina; White, Kyle
Ada and Grace: Toward Realistic and Engaging Virtual Museum Guides Proceedings Article
In: Proceedings of the 10th International Conference on Intelligent Virtual Agents (IVA 2010), Philadelphia, PA, 2010.
@inproceedings{swartout_ada_2010,
title = {Ada and Grace: Toward Realistic and Engaging Virtual Museum Guides},
author = {William Swartout and David Traum and Ron Artstein and Dan Noren and Paul Debevec and Kerry Bronnenkant and Josh Williams and Anton Leuski and Shrikanth Narayanan and Diane Piepol and H. Chad Lane and Jacquelyn Morie and Priti Aggarwal and Matt Liewer and Jen-Yuan Chiang and Jillian Gerten and Selina Chu and Kyle White},
url = {http://ict.usc.edu/pubs/ada%20and%20grace.pdf},
year = {2010},
date = {2010-09-01},
booktitle = {Proceedings of the 10th International Conference on Intelligent Virtual Agents (IVA 2010)},
address = {Philadelphia, PA},
abstract = {To increase the interest and engagement of middle school students in science and technology, the InterFaces project has created virtual museum guides that are in use at the Museum of Science, Boston. The characters use natural language interaction and have near photoreal appearance to increase and presents reports from museum staff on visitor reaction},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wilson, Cyrus A.; Ghosh, Abhijeet; Peers, Pieter; Chiang, Jen-Yuan; Busch, Jay; Debevec, Paul
Temporal Upsampling of Performance Geometry using Photometric Alignment Journal Article
In: ACM Transactions on Graphics, vol. 29, no. 2, 2010.
@article{wilson_temporal_2010,
title = {Temporal Upsampling of Performance Geometry using Photometric Alignment},
author = {Cyrus A. Wilson and Abhijeet Ghosh and Pieter Peers and Jen-Yuan Chiang and Jay Busch and Paul Debevec},
url = {http://ict.usc.edu/pubs/Temporal%20Upsampling%20of%20Performance%20Geometry%20Using%20Photometric%20Alignment.pdf},
year = {2010},
date = {2010-03-01},
journal = {ACM Transactions on Graphics},
volume = {29},
number = {2},
abstract = {We present a novel technique for acquiring detailed facial geometry of a dynamic performance using extended spherical gradient illumination. Key to our method is a new algorithm for jointly aligning two photographs – under a gradient illumination condition and its complement – to a full-on tracking frame, providing dense temporal correspondences under changing lighting conditions. We employ a two step algorithm to reconstruct detailed geometry for every captured frame. In the ï¬rst step, we coalesce information from the gradient illumination frames to the full-on tracking frame, and form a temporally aligned photometric normal map, which is subsequently combined with dense stereo correspondences yielding a detailed geometry. In a second step, we propagate the detailed geometry back to every captured instance guided by the previously computed dense correspondences. We demonstrate reconstructed dynamic facial geometry, captured using moderate to video rates of acquisition, for every captured frame.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
Sorry, no publications matched your criteria.