Publications
Search
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Debevec, Paul
Driving High-Resolution Facial Scans with Video Performance Capture Journal Article
In: ACM Transactions on Graphics (TOG), vol. 34, no. 1, pp. 1– 13, 2014.
@article{fyffe_driving_2014,
title = {Driving High-Resolution Facial Scans with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Scans%20with%20Video%20Performance%20Capture.pdf},
year = {2014},
date = {2014-11-01},
journal = {ACM Transactions on Graphics (TOG)},
volume = {34},
number = {1},
pages = {1-- 13},
abstract = {We present a process for rendering a realistic facial performance with control of viewpoint and illumination. The performance is based on one or more high-quality geometry and reflectance scans of an actor in static poses, driven by one or more video streams of a performance. We compute optical flow correspondences between neighboring video frames, and a sparse set of correspondences between static scans and video frames. The latter are made possible by leveraging the relightability of the static 3D scans to match the viewpoint(s) and appearance of the actor in videos taken in arbitrary environments. As optical flow tends to compute proper correspondence for some areas but not others, we also compute a smoothed, per-pixel confidence map for every computed flow, based on normalized cross-correlation. These flows and their confidences yield a set of weighted triangulation constraints among the static poses and the frames of a performance. Given a single artist-prepared face mesh for one static pose, we optimally combine the weighted triangulation constraints, along with a shape regularization term, into a consistent 3D geometry solution over the entire performance that is drift free by construction. In contrast to previous work, even partial correspondences contribute to drift minimization, for example, where a successful match is found in the eye region but not the mouth. Our shape regularization employs a differential shape term based on a spatially varying blend of the differential shapes of the static poses and neighboring dynamic poses, weighted by the associated flow confidences. These weights also permit dynamic reflectance maps to be produced for the performance by blending the static scan maps. Finally, as the geometry and maps are represented on a consistent artist-friendly mesh, we render the resulting high-quality animated face geometry and animated reflectance maps using standard rendering tools.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pahlen, Javier; Jimenez, Jorge; Danvoye, Etienne; Debevec, Paul; Fyffe, Graham; Alexander, Oleg
Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters Inproceedings
In: SIGGRAPH '14 ACM SIGGRAPH 2014 Courses, pp. 1–384, ACM Press, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2962-0.
@inproceedings{von_der_pahlen_digital_2014,
title = {Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters},
author = {Javier Pahlen and Jorge Jimenez and Etienne Danvoye and Paul Debevec and Graham Fyffe and Oleg Alexander},
url = {http://ict.usc.edu/pubs/Digial%20Ira%20and%20Beyond%20-%20Creating%20Photoreal%20Real-Time%20Digital%20Characters%20(course%20notes).pdf},
doi = {10.1145/2614028.2615407},
isbn = {978-1-4503-2962-0},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH '14 ACM SIGGRAPH 2014 Courses},
pages = {1--384},
publisher = {ACM Press},
address = {Vancouver, British Columbia, Canada},
abstract = {This course explains a complete process for creating next-generation realtime digital human characters, using the Digital Ira collaboration between USC ICT and Activision as an example, covering highres facial scanning, blendshape rigging, video-based performance capture, animation compression, realtime skin and eye shading, hair, latest results, and future directions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man” Inproceedings
In: SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques, Vancouver, Canada, 2014.
@inproceedings{jones_creating_2014,
title = {Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man”},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Creating%20a%20life-sized%20automulitscopic%20Morgan%20Spurlock%20for%20CNNs%20%e2%80%9cInside%20Man%e2%80%9d%20(abstract).pdf},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Vancouver, Canada},
abstract = {We present a system for capturing and rendering life-size 3D human subjects on an automultiscopic display. Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Alexander, Oleg; Barbic, Jernej; Debevec, Paul
Measurement and Modeling of Microfacet Distributions under Deformation Inproceedings
In: Proceedings of SIGDIAL 2014, ACM, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2960-6.
@inproceedings{nagano_measurement_2014,
title = {Measurement and Modeling of Microfacet Distributions under Deformation},
author = {Koki Nagano and Oleg Alexander and Jernej Barbic and Paul Debevec},
url = {http://ict.usc.edu/pubs/Measurement%20and%20Modeling%20of%20Microfacet%20Distribution%20under%20Deformation%20(abstract%20for%20talk).pdf},
doi = {10.1145/2614106.2614124},
isbn = {978-1-4503-2960-6},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of SIGDIAL 2014},
publisher = {ACM},
address = {Vancouver, British Columbia, Canada},
abstract = {We endeavor to model dynamic microfacet distributions of rough surfaces such as skin to simulate the changes in surface BRDF under stretching and compression. We begin by measuring microfacet distributions at 5-micron scale of several surface patches under controlled deformation. Generally speaking, rough surfaces become flatter and thus shinier as they are pulled tighter, and become rougher under compression. From this data, we build a model of how surface reflectance changes as the material deforms. We then simulate dynamic surface reflectance by modifying the anisotropic roughness parameters of a microfacet distribution model in accordance with animated surface deformations. Furthermore, we directly render such dynamic appearance by driving dynamic micro geometries to demonstrate how they influence the meso-scale surface reflectance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
Interpolating vertical parallax for an autostereoscopic three-dimensional projector array Journal Article
In: Journal of Electronic Imaging, vol. 23, no. 1, 2014, ISSN: 1017-9909.
@article{jones_interpolating_2014,
title = {Interpolating vertical parallax for an autostereoscopic three-dimensional projector array},
author = {Andrew Jones and Koki Nagano and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://electronicimaging.spiedigitallibrary.org/article.aspx?doi=10.1117/1.JEI.23.1.011005},
doi = {10.1117/1.JEI.23.1.011005},
issn = {1017-9909},
year = {2014},
date = {2014-03-01},
journal = {Journal of Electronic Imaging},
volume = {23},
number = {1},
abstract = {We present a technique for achieving tracked vertical parallax for multiple users using a variety of autostereoscopic projector array setups, including front- and rear-projection and curved display surfaces. This hybrid parallax approach allows for immediate horizontal parallax as viewers move left and right and tracked parallax as they move up and down, allowing cues such as three-dimensional (3-D) perspective and eye contact to be conveyed faithfully. We use a low-cost RGB-depth sensor to simultaneously track multiple viewer head positions in 3-D space, and we interactively update the imagery sent to the array so that imagery directed to each viewer appears from a consistent and correct vertical perspective. Unlike previous work, we do not assume that the imagery sent to each projector in the array is rendered from a single vertical perspective. This lets us apply hybrid parallax to displays where a single projector forms parts of multiple viewers’ imagery. Thus, each individual projected image is rendered with multiple centers of projection, and might show an object from above on the left and from below on the right. We demonstrate this technique using a dense horizontal array of pico-projectors aimed into an anisotropic vertical diffusion screen, yielding 1.5 deg angular resolution over 110 deg field of view. To create a seamless viewing experience for multiple viewers, we smoothly interpolate the set of viewer heights and distances on a per-vertex basis across the array’s field of view, reducing image distortion, cross talk, and artifacts from tracking errors.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Smith, Stephen; Traum, David; Alexander, Oleg; Leuski, Anton; Jones, Andrew; Georgila, Kallirroi; Debevec, Paul; Swartout, William; Maio, Heather
Time-offset Interaction with a Holocaust Survivor Inproceedings
In: Proceedings of IUI 2014, pp. 163–168, ACM Press, Haifa, Israel, 2014, ISBN: 978-1-4503-2184-6.
@inproceedings{artstein_time-offset_2014,
title = {Time-offset Interaction with a Holocaust Survivor},
author = {Ron Artstein and Stephen Smith and David Traum and Oleg Alexander and Anton Leuski and Andrew Jones and Kallirroi Georgila and Paul Debevec and William Swartout and Heather Maio},
url = {http://ict.usc.edu/pubs/Time-Offset%20Interaction%20with%20a%20Holocaust%20Survivor.pdf},
doi = {10.1145/2557500.2557540},
isbn = {978-1-4503-2184-6},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of IUI 2014},
pages = {163--168},
publisher = {ACM Press},
address = {Haifa, Israel},
abstract = {Time-offset interaction is a new technology that allows for two-way communication with a person who is not available for conversation in real time: a large set of statements are prepared in advance, and users access these statements through natural conversation that mimics face-to-face interaction. Conversational reactions to user questions are retrieved through a statistical classifier, using technology that is similar to previous interactive systems with synthetic characters; however, all of the retrieved utterances are genuine statements by a real person. Recordings of answers, listening and idle behaviors, and blending techniques are used to create a persistent visual image of the person throughout the interaction. A proof-of-concept has been implemented using the likeness of Pinchas Gutter, a Holocaust survivor, enabling short conversations about his family, his religious views, and resistance. This proof-of-concept has been shown to dozens of people, from school children to Holocaust scholars, with many commenting on the impact of the experience and potential for this kind of interface.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Graham, Paul; Nagano, Koki; Busch, Jay; Debevec, Paul
Driving High-Resolution Facial Blendshapes with Video Performance Capture Inproceedings
In: SIGGRAPH, Anaheim, CA, 2013.
@inproceedings{fyffe_driving_2013,
title = {Driving High-Resolution Facial Blendshapes with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Graham and Koki Nagano and Jay Busch and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Blendshapes%20with%20Video%20Performance.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH},
address = {Anaheim, CA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Jones, Andrew; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
An Autostereoscopic Projector Array Optimized for 3D Facial Display Inproceedings
In: SIGGRAPH 2013 Emerging Technologies, 2013.
@inproceedings{nagano_autostereoscopic_2013,
title = {An Autostereoscopic Projector Array Optimized for 3D Facial Display},
author = {Koki Nagano and Andrew Jones and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Autostereoscopic%20Projector%20Array%20Optimized%20for%203D%20Facial%20Display%20.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH 2013 Emerging Technologies},
abstract = {Video projectors are rapidly shrinking in size, power consumption, and cost. Such projectors provide unprecedented flexibility to stack, arrange, and aim pixels without the need for moving parts. This dense projector display is optimized in size and resolution to display an autostereoscopic life-sized 3D human face. It utilizes 72 Texas Instruments PICO projectors to illuminate a 30 cm x 30 cm anisotropic screen with a wide 110-degree field of view. The demonstration includes both live scanning of subjects and virtual animated characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Alexander, Oleg; Fyffe, Graham; Busch, Jay; Yu, Xueming; Ichikari, Ryosuke; Jones, Andrew; Debevec, Paul; Jimenez, Jorge; Danvoye, Etienne; Antionazzi, Bernardo; Eheler, Mike; Kysela, Zybnek; Pahlen, Javier
Digital Ira: Creating a Real-Time Photoreal Digital Actor Inproceedings
In: SIGGRAPH Real Time Live!, Anaheim, CA, 2013, ISBN: 978-1-4503-2342-0.
@inproceedings{alexander_digital_2013,
title = {Digital Ira: Creating a Real-Time Photoreal Digital Actor},
author = {Oleg Alexander and Graham Fyffe and Jay Busch and Xueming Yu and Ryosuke Ichikari and Andrew Jones and Paul Debevec and Jorge Jimenez and Etienne Danvoye and Bernardo Antionazzi and Mike Eheler and Zybnek Kysela and Javier Pahlen},
url = {http://dl.acm.org/citation.cfm?doid=2503385.2503387},
doi = {10.1145/2503385.2503387},
isbn = {978-1-4503-2342-0},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH Real Time Live!},
address = {Anaheim, CA},
abstract = {In 2008, the "Digital Emily" project [Alexander et al. 2009] showed how a set of high-resolution facial expressions scanned in a light stage could be rigged into a real-time photoreal digital character and driven with video-based facial animation techniques. However, Digital Emily was rendered offline, involved just the front of the face, and was never seen in a tight closeup. In this collaboration between Activision and USC ICT shown at SIGGRAPH 2013's Real-Time Live venue, we endeavoured to create a real-time, photoreal digital human character which could be seen from any viewpoint, in any lighting, and could perform realistically from video performance capture even in a tight closeup. In addition, we wanted this to run in a real-time game-ready production pipeline, ultimately achieving 180 frames per second for a full-screen character on a two-year old graphics card.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tunwattanapong, Borom; Fyffe, Graham; Graham, Paul; Busch, Jay; Yu, Xueming; Ghosh, Abhijeet; Debevec, Paul
Acquiring Reflectance and Shape from Continuous Spherical Harmonic Illumination Journal Article
In: ACM Transactions on Graphics, vol. 32, no. 4, 2013, ISSN: 07300301.
@article{tunwattanapong_acquiring_2013,
title = {Acquiring Reflectance and Shape from Continuous Spherical Harmonic Illumination},
author = {Borom Tunwattanapong and Graham Fyffe and Paul Graham and Jay Busch and Xueming Yu and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Acquiring%20Re%ef%ac%82ectance%20and%20Shape%20from%20Continuous%20Spherical%20Harmonic%20Illumination.pdf},
doi = {10.1145/2461912.2461944},
issn = {07300301},
year = {2013},
date = {2013-07-01},
journal = {ACM Transactions on Graphics},
volume = {32},
number = {4},
abstract = {We present a novel technique for acquiring the geometry and spatially-varying reflectance properties of 3D objects by observing them under continuous spherical harmonic illumination conditions. The technique is general enough to characterize either entirely specular or entirely diffuse materials, or any varying combination across the surface of the object. We employ a novel computational illumination setup consisting of a rotating arc of controllable LEDs which sweep out programmable spheres of incident illumination during 1-second exposures. We illuminate the object with a succession of spherical harmonic illumination conditions, as well as photographed environmental lighting for validation. From the response of the object to the harmonics, we can separate diffuse and specular reflections, estimate world-space diffuse and specular normals, and compute anisotropic roughness parameters for each view of the object. We then use the maps of both diffuse and specular reflectance to form correspondences in a multiview stereo algorithm, which allows even highly specular surfaces to be corresponded across views. The algorithm yields a complete 3D model and a set of merged reflectance maps. We use this technique to digitize the shape and reflectance of a variety of objects difficult to acquire with other techniques and present validation renderings which match well to photographs in similar lighting.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Alexander, Oleg; Busch, Jay; Graham, Paul; Tunwattanapong, Borom; Jones, Andrew; Nagano, Koki; Ichikari, Ryosuke; Debevec, Paul; Fyffe, Graham
Digital Ira: High-Resolution Facial Performance Playback Inproceedings
In: SIGGRAPH 2013 Real-Time Live! The 40th International Conference and Exhibition on Computer Graphics and Interactive Techniques, Anaheim, CA, 2013.
@inproceedings{alexander_digital_2013-1,
title = {Digital Ira: High-Resolution Facial Performance Playback},
author = {Oleg Alexander and Jay Busch and Paul Graham and Borom Tunwattanapong and Andrew Jones and Koki Nagano and Ryosuke Ichikari and Paul Debevec and Graham Fyffe},
url = {http://gl.ict.usc.edu/Research/DigitalIra/},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH 2013 Real-Time Live! The 40th International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Anaheim, CA},
abstract = {In this collaboration between Activision and USC ICT, we tried to create a real-time, photoreal digital human character which could be seen from any viewpoint, any lighting, and could perform realistically from video performance capture even in a tight closeup. In addition, we needed this to run in a game-ready production pipeline. To achieve this, we scanned the actor in thirty high-resolution expressions using the USC ICT's new Light Stage X system [Ghosh et al. SIGGRAPHAsia2011] and chose eight expressions for the real-time performance rendering. To record the performance, we shot multi-view 30fps video of the actor performing improvised lines using the same multi-camera rig. We used a new tool called Vuvuzela to interactively and precisely correspond all expression (u,v)'s to the neutral expression, which was retopologized to an artist mesh. Our new offline animation solver works by creating a performance graph representing dense GPU optical flow between the video frames and the eight expressions. This graph gets pruned by analyzing the correlation between the video frames and the expression scans over twelve facial regions. The algorithm then computes dense optical flow and 3D triangulation yielding per-frame spatially varying blendshape weights approximating the performance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Inproceedings
In: EUROGRAPHICS, Girona, Spain, 2013.
@inproceedings{graham_measurement-based_2013,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2013},
date = {2013-05-01},
booktitle = {EUROGRAPHICS},
address = {Girona, Spain},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2012, 2012.
@techreport{graham_measurement-based_2012,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2012.pdf},
year = {2012},
date = {2012-11-01},
number = {ICT TR 01 2012},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a technique for generating microstructure-level facial geometry by augmenting a mesostructure-level facial scan with detail synthesized from a set of exemplar skin patches scanned at much higher resolution. We use constrained texture synthesis based on image analogies to increase the resolution of the facial scan in a way that is consistent with the scanned mesostructure. We digitize the exemplar patches with a polarization-based computational illumination technique which considers specular reflection and single scattering. The recorded microstructure patches can be used to synthesize full-facial microstructure detail for either the same subject or to a different subject. We show that the technique allows for greater realism in facial renderings including more accurate reproduction of skin’s specular roughness and anisotropic reflection effects.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Debevec, Paul
The Light Stages and Their Applications to Photoreal Digital Actors Inproceedings
In: SIGGRAPH Asia, Singapore, 2012.
@inproceedings{debevec_light_2012,
title = {The Light Stages and Their Applications to Photoreal Digital Actors},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/The%20Light%20Stages%20and%20Their%20Applications%20to%20Photoreal%20Digital%20Actors.pdf},
year = {2012},
date = {2012-11-01},
booktitle = {SIGGRAPH Asia},
address = {Singapore},
abstract = {The Light Stage systems built at UC Berkeley and USC ICT have enabled a variety of facial scanning and reflectance measurement techniques that have been explored in several research papers and used in various commercial applications. This short paper presents the evolutionary history of the Light Stage Systems and some of the techniques and applications they have enabled.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Graham, Paul; Busch, Jay; Bolas, Mark
A Cell Phone Based Platform for Facial Performance Capture Inproceedings
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Los Angeles, CA, 2012.
@inproceedings{debevec_cell_2012,
title = {A Cell Phone Based Platform for Facial Performance Capture},
author = {Paul Debevec and Paul Graham and Jay Busch and Mark Bolas},
url = {http://ict.usc.edu/pubs/A%20Cell%20Phone%20Based%20Platform%20for%20Facial%20Performance%20Capture.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
address = {Los Angeles, CA},
abstract = {We demonstrate a novel light probe which can estimate the full dynamic range of a scene with multiple bright light sources. It places diffuse strips between mirrored spherical quadrants, effectively co-locating diffuse and mirrored probes to record the full dynamic range of illumination in a single exposure. From this image, we estimate the intensity of multiple saturated light sources by solving a linear system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Yufeng; Peers, Pieter; Debevec, Paul; Ghosh, Abhijeet
Estimating Diffusion Parameters from Polarized Spherical Gradient Illumination Inproceedings
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), 2012.
@inproceedings{zhu_estimating_2012,
title = {Estimating Diffusion Parameters from Polarized Spherical Gradient Illumination},
author = {Yufeng Zhu and Pieter Peers and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Estimating%20Diffusion%20Parameters%20from%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
abstract = {Accurately modeling and reproducing the appearance of real-world materials is crucial for the production of photoreal imagery of digital scenes and subjects. The appearance of many common materials is the result of subsurface light transport that gives rise to the characteristic “soft” appearance and the unique coloring of such materials. Jensen et al. [2001] introduced the dipole-diffusion approximation to efficiently model isotropic subsurface light transport. The scattering parameters needed to drive the dipole-diffusion approximation are typically estimated by illuminating a homogeneous surface patch with a collimated beam of light, or in the case of spatially varying translucent materials with a dense set of structured light patterns. A disadvantage of most existing techniques is that acquisition time is traded off with spatial density of the scattering parameters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Busch, Jay; Bolas, Mark; Debevec, Paul
A Single-Shot Light Probe Inproceedings
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Los Angeles, CA, 2012.
@inproceedings{graham_single-shot_2012,
title = {A Single-Shot Light Probe},
author = {Paul Graham and Jay Busch and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Single-Shot%20Light%20Probe.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
address = {Los Angeles, CA},
abstract = {We demonstrate a novel light probe which can estimate the full dynamic range of a scene with multiple bright light sources. It places diffuse strips between mirrored spherical quadrants, effectively co-locating diffuse and mirrored probes to record the full dynamic range of illumination in a single exposure. From this image, we estimate the intensity of multiple saturated light sources by solving a linear system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Meseth, Jan; Hempel, Shawn; Weidlich, Andrea; Fyffe, Graham; Miller, Craig; Carroll, Paul; Debevec, Paul; Fyffe, Lynn
Improved Linear-Light-Source Material Reflectance Scanning Inproceedings
In: ACM SIGGRAPH 2012 Talks, 2012.
@inproceedings{meseth_improved_2012,
title = {Improved Linear-Light-Source Material Reflectance Scanning},
author = {Jan Meseth and Shawn Hempel and Andrea Weidlich and Graham Fyffe and Craig Miller and Paul Carroll and Paul Debevec and Lynn Fyffe},
url = {http://ict.usc.edu/pubs/Improved%20Linear-Light-Source%20Material%20Reflectance%20Scanning.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM SIGGRAPH 2012 Talks},
abstract = {We improve the resolution, accuracy, and efficiency of Linear Light Source (LLS) Reflectometry with several acquisition setup and data processing improvements, allowing spatiallyvarying reflectance parameters of complex materials to be recorded with unprecedented accuracy and efficiency.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Inproceedings
In: ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12), Los Angeles, CA, 2012.
@inproceedings{graham_measurement-based_2012-1,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/A%20Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12)},
address = {Los Angeles, CA},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham
High Fidelity Facial Hair Capture Technical Report
University of Southern California Institute for Creative Technologies Playa Vista, CA, no. ICT TR 02 2012, 2012.
@techreport{fyffe_high_2012,
title = {High Fidelity Facial Hair Capture},
author = {Graham Fyffe},
url = {http://ict.usc.edu/pubs/High%20Fidelity%20Facial%20Hair%20Capture-TR.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {SIGGRAPH},
number = {ICT TR 02 2012},
address = {Playa Vista, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We propose an extension to multi-view face capture that reconstructs high quality facial hair automatically. Multi-view stereo is well known for producing high quality smooth surfaces and meshes, but fails on fine structure such as hair. We exploit this failure, and automatically detect the hairs on a face by careful analysis of the pixel reconstruction error of the multi-view stereo result. Central to our work is a novel stereo matching cost function, which we call equalized cross correlation, that properly accounts for both camera sensor noise and pixel sampling variance. In contrast to previous works that treat hair modeling as a synthesis problem based on image cues, we reconstruct facial hair to explain the same highresolution input photographs used for face reconstruction, producing a result with higher fidelity to the input photographs.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Filter
2000
Debevec, Paul; Hawkins, Tim; Tchou, Chris; Duiker, Haarm-Pieter; Sarokin, Westley
Acquiring the Reflectance Field of a Human Face Inproceedings
In: SIGGRAPH, New Orleans, LA, 2000.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_acquiring_2000,
title = {Acquiring the Reflectance Field of a Human Face},
author = {Paul Debevec and Tim Hawkins and Chris Tchou and Haarm-Pieter Duiker and Westley Sarokin},
url = {http://ict.usc.edu/pubs/Acquiring%20the%20Re%EF%AC%82ectance%20Field%20of%20a%20Human%20Face.pdf},
year = {2000},
date = {2000-07-01},
booktitle = {SIGGRAPH},
address = {New Orleans, LA},
abstract = {We present a method to acquire the reflectance field of a human face and use these measurements to render the face under arbitrary changes in lighting and viewpoint. We first acquire images of the face from a small set of viewpoints under a dense sampling of incident illumination directions using a light stage. We then construct a reflectance function image for each observed image pixel from its values over the space of illumination directions. From the reflectance functions, we can directly generate images of the face from the original viewpoints in any form of sampled or computed illumination. To change the viewpoint, we use a model of skin reflectance to estimate the appearance of the reflectance functions for novel viewpoints. We demonstrate the technique with synthetic renderings of a person's face under novel illumination and viewpoints.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}