Publications
Search
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
Rendering for an Interactive 360 Degree Light Field Display Proceedings Article
In: ACM SIGGRAPH conference proceedings, San Diego, CA, 2007.
@inproceedings{jones_rendering_2007,
title = {Rendering for an Interactive 360 Degree Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Rendering%20for%20an%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {ACM SIGGRAPH conference proceedings},
address = {San Diego, CA},
abstract = {We describe a set of rendering techniques for an autostereoscopic light field display able to present interactive 3D graphics to multiple simultaneous viewers 360 degrees around the display. The display consists of a high-speed video projector, a spinning mirror covered by a holographic diffuser, and FPGA circuitry to decode specially rendered DVI video signals. The display uses a standard programmable graphics card to render over 5,000 images per second of interactive 3D graphics, projecting 360-degree views with 1.25 degree separation up to 20 updates per second. We describe the system's projection geometry and its calibration process, and we present a multiple-center-of-projection rendering technique for creating perspective-correct images from arbitrary viewpoints around the display. Our projection technique allows correct vertical perspective and parallax to be rendered for any height and distance when these parameters are known, and we demonstrate this effect with interactive raster graphics using a tracking system to measure the viewer's height and distance. We further apply our projection technique to the display of photographed light fields with accurate horizontal and vertical parallax. We conclude with a discussion of the display's visual accommodation performance and discuss techniques for displaying color imagery.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Peers, Pieter; Chabert, Charles-Felix; Weiss, Malte; Debevec, Paul
Rapid Acquisition of Specular and Diffuse Normal Maps from Polarized Spherical Gradient Illumination Proceedings Article
In: Kautz, Jan; Pattanaik, (Ed.): Eurographics Symposium on Rendering, 2007.
@inproceedings{ma_rapid_2007,
title = {Rapid Acquisition of Specular and Diffuse Normal Maps from Polarized Spherical Gradient Illumination},
author = {Wan-Chun Ma and Tim Hawkins and Pieter Peers and Charles-Felix Chabert and Malte Weiss and Paul Debevec},
editor = {Jan Kautz and Pattanaik},
url = {http://ict.usc.edu/pubs/Rapid%20Acquisition%20of%20Specular%20and%20Diffuse%20Normal%20Maps%20from%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Eurographics Symposium on Rendering},
abstract = {We estimate surface normal maps of an object from either its diffuse or specular reflectance using four spherical gradient illumination patterns. In contrast to traditional photometric stereo, the spherical patterns allow normals to be estimated simultaneously from any number of viewpoints. We present two polarized lighting techniques that allow the diffuse and specular normal maps of an object to be measured independently. For scattering materials, we show that the specular normal maps yield the best record of detailed surface shape while the diffuse normals deviate from the true surface normal due to subsurface scattering, and that this effect is dependent on wavelength. We show several applications of this acquisition technique. First, we capture normal maps of a facial performance simultaneously from several viewing positions using time-multiplexed illumination. Second, we show that highresolution normal maps based on the specular component can be used with structured light 3D scanning to quickly acquire high-resolution facial surface geometry using off-the-shelf digital still cameras. Finally, we present a realtime shading model that uses independently estimated normal maps for the specular and diffuse color channels to reproduce some of the perceptually important effects of subsurface scattering.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lamond, Bruce; Peers, Pieter; Debevec, Paul
Fast Image-based Separation of Diffuse and Specular Reflections Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2007, 2007.
@techreport{lamond_fast_2007,
title = {Fast Image-based Separation of Diffuse and Specular Reflections},
author = {Bruce Lamond and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-02-2007.pdf},
year = {2007},
date = {2007-01-01},
number = {ICT TR 02 2007},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel image-based method for separating diffuse and specular reflections of real objects under distant environmental illumination. By illuminating a scene with only four high frequency illumination patterns, the specular and diffuse reflections can be separated by computing the maximum and minimum observed pixel values. Furthermore, we show that our method can be extended to separate diffuse and specular components under image-based environmental illumination. Applications range from image-based modeling of reflectance properties to improved normal and geometry acquisition.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Debevec, Paul; Bolas, Mark; McDowall, Ian
Concave Surround Optics for Rapid Multi-View Imaging Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
@inproceedings{debevec_concave_2006,
title = {Concave Surround Optics for Rapid Multi-View Imaging},
author = {Paul Debevec and Mark Bolas and Ian McDowall},
url = {http://ict.usc.edu/pubs/ConcaveSurroundOptics_ASC2006.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Many image-based modeling and rendering techniques involve photographing a scene from an array of different viewpoints. Usually, this is achieved by moving the camera or the subject to successive positions, or by photographing the scene with an array of cameras. In this work, we present a system of mirrors to simulate the appearance of camera movement around a scene while the physical camera remains stationary. The system thus is amenable to capturing dynamic events avoiding the need to construct and calibrate an array of cameras. We demonstrate the system with a high speed video of a dynamic scene. We show smooth camera motion rotating 360 degrees around the scene. We discuss the optical performance of our system and compare with alternate setups.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Simulating Spatially Varying Lighting on a Live Performance Proceedings Article
In: 3rd European Conference on Visual Media Production (CVMP 2006), London, UK, 2006.
@inproceedings{jones_simulating_2006,
title = {Simulating Spatially Varying Lighting on a Live Performance},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Simulating%20Spatially%20Varying%20Lighting%20on%20a%20Live%20Performance.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {3rd European Conference on Visual Media Production (CVMP 2006)},
address = {London, UK},
abstract = {We present an image-based technique for relighting dynamic human performances under spatially varying illumination. Our system generates a time-multiplexed LED basis and a geometric model recovered from high-speed structured light patterns. The geometric model is used to scale the intensity of each pixel differently according to its 3D position within the spatially varying illumination volume. This yields a first-order approximation of the correct appearance under the spatially varying illumination. A global illumination process removes indirect illumination from the original lighting basis and simulates spatially varying indirect illumination. We demonstrate this technique for a human performance under several spatially varying lighting environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
Virtual Cinematography: Relighting through Computation Journal Article
In: IEEE ComputerMagazine, vol. 39, pp. 57–65, 2006.
@article{debevec_virtual_2006,
title = {Virtual Cinematography: Relighting through Computation},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Cinematography-%20Relighting%20through%20Computation.pdf},
year = {2006},
date = {2006-08-01},
journal = {IEEE ComputerMagazine},
volume = {39},
pages = {57–65},
abstract = {Recording how scenes transform incident illumination into radiant light is an active topic in computational photography. Such techniques make it possible to create virtual images of a person or place from new viewpoints and in any form of illumination.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Callieri, Marco; Debevec, Paul; Scopigno, Roberto
A realtime immersive application with realistic lighting: The Parthenon Journal Article
In: Computers & Graphics, vol. 30, no. 3, pp. 368–376, 2006.
@article{callieri_realtime_2006,
title = {A realtime immersive application with realistic lighting: The Parthenon},
author = {Marco Callieri and Paul Debevec and Roberto Scopigno},
url = {http://ict.usc.edu/pubs/A%20realtime%20immersive%20application%20with%20realistic%20lighting-%20The%20Parthenon.pdf},
year = {2006},
date = {2006-06-01},
journal = {Computers & Graphics},
volume = {30},
number = {3},
pages = {368–376},
abstract = {Offline rendering techniques have nowadays reached an astonishing level of realism but pay the cost of long computational times. The new generation of programmable graphic hardware, on the other hand, gives the possibility to implement in realtime some of the visual effects previously available only for cinematographic production. We describe the design and implementation of an interactive system which is able to reproduce in realtime one of the crucial sequences from the short movie “The Parthenon” presented at Siggraph 2004. The application is designed to run on a specific immersive reality system, making possible for a user to perceive the virtual environment with nearly cinematographic visual quality.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Einarsson, Per; Chabert, Charles-Felix; Jones, Andrew; Ma, Wan-Chun; Lamond, Bruce; Hawkins, Tim; Bolas, Mark; Sylwan, Sebastian; Debevec, Paul
Relighting Human Locomotion with Flowed Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering (2006), 2006.
@inproceedings{einarsson_relighting_2006,
title = {Relighting Human Locomotion with Flowed Reflectance Fields},
author = {Per Einarsson and Charles-Felix Chabert and Andrew Jones and Wan-Chun Ma and Bruce Lamond and Tim Hawkins and Mark Bolas and Sebastian Sylwan and Paul Debevec},
url = {http://ict.usc.edu/pubs/Relighting%20Human%20Locomotion%20with%20Flowed%20Reflectance%20Fields.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Eurographics Symposium on Rendering (2006)},
abstract = {We present an image-based approach for capturing the appearance of a walking or running person so they can be rendered realistically under variable viewpoint and illumination. In our approach, a person walks on a treadmill at a regular rate as a turntable slowly rotates the person's direction. As this happens, the person is filmed with a vertical array of high-speed cameras under a time-multiplexed lighting basis, acquiring a seven-dimensional dataset of the person under variable time, illumination, and viewing direction in approximately forty seconds. We process this data into a flowed reflectance field using an optical flow algorithm to correspond pixels in neighboring camera views and time samples to each other, and we use image compression to reduce the size of this data.We then use image-based relighting and a hardware-accelerated combination of view morphing and light field rendering to render the subject under user-specified viewpoint and lighting conditions. To composite the person into a scene, we use an alpha channel derived from back lighting and a retroreflective treadmill surface and a visual hull process to render the shadows the person would cast onto the ground. We demonstrate realistic composites of several subjects into real and virtual environments using our technique.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters Proceedings Article
In: 11th International Fall Workshop on Vision, Modeling and Visualization, Aachen, Germany, 2006.
@inproceedings{tariq_efficient_2006-1,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/Efficient%20Estimation%20of%20Spatially%20Varying%20Subsurface%20Scattering%20Parameters.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {11th International Fall Workshop on Vision, Modeling and Visualization},
address = {Aachen, Germany},
abstract = {We present an image-based technique to efficiently acquire spatially varying subsurface reflectance properties of a human face. The estimated prop- erties can be used directly to render faces with spa- tially varying scattering, or can be used to estimate a robust average across the face. We demonstrate our technique with renderings of peoples' faces un- der novel, spatially-varying illumination and pro- vide comparisons with current techniques. Our cap- tured data consists of images of the face from a sin- gle viewpoint under two small sets of projected im- ages. The first set, a sequence of phase-shifted pe- riodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The sec- ond set of structured light patterns is used to obtain face geometry. We subtract the minimum of each profile to remove the contribution of interreflected light from the rest of the face, and then match the observed reflectance profiles to scattering properties predicted by a scattering model using a lookup ta- ble. From these properties we can generate images of the subsurface reflectance of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2006, 2006.
@techreport{tariq_efficient_2006,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 01 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present an image-based technique to rapidly ac- quire spatially varying subsurface reflectance prop- erties of a human face. The estimated properties can be used directly to render faces with spatially vary- ing scattering, or can be used to estimate a robust average across the face. We demonstrate our tech- nique with renderings of peoples' faces under novel, spatially-varying illumination and provide compar- isons with current techniques. Our captured data consists of images of the face from a single view- point under two small sets of projected images. The first set, a sequence of phase shifted periodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The second set contains structured light and is used to obtain face geometry. We match the observed reflectance pro- files to scattering properties predicted by a scatter- ing model using a lookup table. From these prop- erties we can generate images of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Peers, Pieter; Hawkins, Tim; Debevec, Paul
A Reflective Light Stage Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 04 2006, 2006.
@techreport{peers_reflective_2006,
title = {A Reflective Light Stage},
author = {Pieter Peers and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-04.2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 04 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel acquisition device to capture high resolution 4D re- flectance fields of real scenes. The device consists of a concave hemispher- ical surface coated with a rough specular paint and a digital video projector with a fish-eye lens positioned near the center of the hemisphere. The scene is placed near the projector, also near the center, and photographed from a fixed vantage point. The projector projects a high-resolution image of incident illu- mination which is reflected by the rough hemispherical surface to become the illumination on the scene. We demonstrate the utility of this device by cap- turing a high resolution hemispherical reflectance field of a specular object which would be difficult to capture using previous acquisition techniques.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Debevec, Paul
Capturing and Simulating Physically Accurate Illumination in Computer Graphics Proceedings Article
In: 11th Annual Symposium on Frontiers of Engineering, Niskayuna, NY, 2005.
@inproceedings{debevec_capturing_2005,
title = {Capturing and Simulating Physically Accurate Illumination in Computer Graphics},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Simulating%20Physically%20Accurate%20Illumination%20in%20Computer%20Graphics.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {11th Annual Symposium on Frontiers of Engineering},
address = {Niskayuna, NY},
abstract = {Anyone who has seen a recent summer blockbuster has witnessed the dramatic increases in computer-generated realism in recent years. Visual effects supervisors now report that bringing even the most challenging visions of film directors to the screen is no longer a question of whatDs possible; with todayDs techniques it is only a matter of time and cost. Driving this increase in realism have been computer graphics (CG) techniques for simulating how light travels within a scene and for simulating how light reflects off of and through surfaces. These techniquesJsome developed recently, and some originating in the 1980DsJare being applied to the visual effects process by computer graphics artists who have found ways to channel the power of these new tools.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Median Cut Algorithm for Light Probe Sampling Proceedings Article
In: SIGGRAPH (Special Interest Group - Graphics), Los Angeles, CA, 2005.
@inproceedings{debevec_median_2005,
title = {A Median Cut Algorithm for Light Probe Sampling},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Median%20Cut%20Algorithm%20for%20Light%20Probe%20Sampling.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH (Special Interest Group - Graphics)},
address = {Los Angeles, CA},
abstract = {We present a technique for approximating a light probe image as a constellation of light sources based on a median cut algorithm. The algorithm is efï¬cient, simple to implement, and can realistically represent a complex lighting environment with as few as 64 point light sources.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Performance Geometry Capture for Spatially Varying Relighting Proceedings Article
In: SIGGRAPH 2005 Sketch, Los Angeles, CA, 2005.
@inproceedings{jones_performance_2005,
title = {Performance Geometry Capture for Spatially Varying Relighting},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Performance%20Geometry%20Capture%20for%20Spatially%20Varying%20Relighting.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH 2005 Sketch},
address = {Los Angeles, CA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Einarsson, Per; Debevec, Paul
Acquisition of Time-Varying Participating Media Proceedings Article
In: SIGGRAPH, Los Angeles, CA, 2005.
@inproceedings{hawkins_acquisition_2005,
title = {Acquisition of Time-Varying Participating Media},
author = {Tim Hawkins and Per Einarsson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Acquisition%20of%20Time-Varying%20Participating%20Media.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {SIGGRAPH},
address = {Los Angeles, CA},
abstract = {We present a technique for capturing time-varying volumetric data of participating media. A laser sheet is swept repeatedly through the volume, and the scattered light is imaged using a high-speed camera. Each sweep of the laser provides a near-simultaneous volume of density values. We demonstrate rendered animations under changing viewpoint and illumination, making use of measured values for the scattering phase function and albedo.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Einarsson, Per; Debevec, Paul
A Dual Light Stage Proceedings Article
In: Dutré, Philip; Bala, Kavita (Ed.): Eurographics Symposium on Rendering, Konstanz, Germany, 2005.
@inproceedings{hawkins_dual_2005,
title = {A Dual Light Stage},
author = {Tim Hawkins and Per Einarsson and Paul Debevec},
editor = {Philip Dutré and Kavita Bala},
url = {http://ict.usc.edu/pubs/A%20Dual%20Light%20Stage.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Eurographics Symposium on Rendering},
address = {Konstanz, Germany},
abstract = {We present a technique for capturing high-resolution 4D reflectance ï¬elds using the reciprocity property of light transport. In our technique we place the object inside a diffuse spherical shell and scan a laser across its surface. For each incident ray, the object scatters a pattern of light onto the inner surface of the sphere, and we photograph the resulting radiance from the sphere's interior using a camera with a ï¬sheye lens. Because of reciprocity, the image of the inside of the sphere corresponds to the reflectance function of the surface point illuminated by the laser, that is, the color that point would appear to a camera along the laser ray when the object is lit from each direction on the surface of the sphere. The measured reflectance functions allow the object to be photorealistically rendered from the laser's viewpoint under arbitrary directional illumination conditions. Since each captured reflectance function is a high-resolution image, our data reproduces sharp specular reflections and self-shadowing more accurately than previous approaches. We demonstrate our technique by scanning objects with a wide range of reflectance properties and show accurate renderings of the objects under novel illumination conditions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Poullis, Charalambos; Gardner, Andrew; Debevec, Paul
Photogrammetric Modeling and Image-based Rendering for Rapid Virtual Environement Creation Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
@inproceedings{poullis_photogrammetric_2004,
title = {Photogrammetric Modeling and Image-based Rendering for Rapid Virtual Environement Creation},
author = {Charalambos Poullis and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/PHOTOGRAMMETRIC%20MODELING%20AND%20IMAGE-BASED%20RENDERING%20FOR%20RAPID%20VIRTUAL%20ENVIRONMENT%20CREATION.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {For realistic simulations, architecture is one of the most important elements to model and render photorealistically. Current techniques of converting architectural plans or survey data to CAD models are labor intensive, and methods for rendering such models are generally not photorealistic. In this work, we present a new approach for modeling and rendering existing architectural scenes from a sparse set of still photographs. For modeling, we use photogrammetric modeling techniques to recover a the geometric representation of the architecture. The photogrammetric modeling approach presented in this paper is effective, robust and powerful because it fully exploits structural symmetries and constraints which are characteristic of architectural scenes. For rendering, we use view-dependent texture mapping, a method for compositing multiple images of a scene to create renderings from novel views. Lastly, we present a software package, named Fac¸ade, which uses the techniques described to recover the geometry and appearance of architectural scenes directly from a sparse set of photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stumpfel, Jessi; Jones, Andrew; Wenger, Andreas; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Direct HDR Capture of the Sun and Sky Proceedings Article
In: Proceedings of the 3rd International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa, Stellenbosch, South Africa, 2004.
@inproceedings{stumpfel_direct_2004,
title = {Direct HDR Capture of the Sun and Sky},
author = {Jessi Stumpfel and Andrew Jones and Andreas Wenger and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Direct%20HDR%20Capture%20of%20the%20Sun%20and%20Sky.pdf},
year = {2004},
date = {2004-11-01},
booktitle = {Proceedings of the 3rd International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa},
address = {Stellenbosch, South Africa},
abstract = {We present a technique for capturing the extreme dynamic range of natural illumination environments that include the sun and sky, which has presented a challenge for traditional high dynamic range photography processes. We find that through careful selection of exposure times, aperture, and neutral density filters that this full range can be covered in seven exposures with a standard digital camera. We discuss the particular calibration issues such as lens vignetting, in- frared sensitivity, and spectral transmission of neutral den- sity filters which must be addressed. We present an adap- tive exposure range adjustment technique for minimizing the number of exposures necessary. We demonstrate our results by showing time-lapse renderings of a complex scene illuminated by high-resolution, high dynamic range natural illumination environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Gardner, Andrew; Tchou, Chris; Hawkins, Tim
Postproduction Re-Illumination of Live Action Using Time-Multiplexed Lighting Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 05.2004, 2004.
@techreport{debevec_postproduction_2004,
title = {Postproduction Re-Illumination of Live Action Using Time-Multiplexed Lighting},
author = {Paul Debevec and Andrew Gardner and Chris Tchou and Tim Hawkins},
url = {http://ict.usc.edu/pubs/Postproduction%20Re-Illumination%20of%20Live%20Action%20Using%20Time-Multiplexed%20Lighting.pdf},
year = {2004},
date = {2004-06-01},
number = {ICT TR 05.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this work, we present a technique for capturing a time-varying human performance in such a way that it can be re-illuminated in postproduction. The key idea is to illuminate the subject with a variety of rapidly changing time-multiplexed basis lighting conditions, and to record these lighting conditions with a fast enough video camera so that several or many different basis lighting conditions are recorded during the span of the final video's desired frame rate. In this poster we present two versions of such a system and propose plans for creating a complete, production-ready device.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hawkins, Tim; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Goransson, Fredrik; Debevec, Paul
Animatable Facial Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering, Norkoping, Sweden, 2004.
@inproceedings{hawkins_animatable_2004,
title = {Animatable Facial Reflectance Fields},
author = {Tim Hawkins and Andreas Wenger and Chris Tchou and Andrew Gardner and Fredrik Goransson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Animatable%20Facial%20Re%EF%AC%82ectance%20Fields.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Eurographics Symposium on Rendering},
address = {Norkoping, Sweden},
abstract = {We present a technique for creating an animatable image-based appearance model of a human face, able to capture appearance variation over changing facial expression, head pose, view direction, and lighting condition. Our capture process makes use of a specialized lighting apparatus designed to rapidly illuminate the subject sequentially from many different directions in just a few seconds. For each pose, the subject remains still while six video cameras capture their appearance under each of the directions of lighting. We repeat this process for approximately 60 different poses, capturing different expressions, visemes, head poses, and eye positions. The images for each of the poses and camera views are registered to each other semi-automatically with the help of fiducial markers. The result is a model which can be rendered realistically under any linear blend of the captured poses and under any desired lighting condition by warping, scaling, and blending data from the original images. Finally, we show how to drive the model with performance capture data, where the pose is not necessarily a linear combination of the original captured poses.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.