Publications
Search
Fyffe, Graham; Debevec, Paul
Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination Proceedings Article
In: Preceedings of ICCP 2015, pp. 1–10, IEEE, Houston, Texas, 2015.
@inproceedings{fyffe_single-shot_2015,
title = {Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination},
author = {Graham Fyffe and Paul Debevec},
url = {http://ict.usc.edu/pubs/Single-Shot%20Reflectance%20Measurement%20from%20Polarized%20Color%20Gradient%20Illumination.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Preceedings of ICCP 2015},
pages = {1–10},
publisher = {IEEE},
address = {Houston, Texas},
abstract = {We present a method for acquiring the per-pixel diffuse albedo, specular albedo, and surface normal maps of a subject at a single instant in time. The method is single shot, requiring no optical flow, and per-pixel, making no assumptions regarding albedo statistics or surface connectivity. We photograph the subject inside a spherical illumination device emitting a static lighting pattern of vertically polarized RGB color gradients aligned with the XYZ axes, and horizontally polarized RGB color gradients in versely aligned with the XYZ axes. We capture simultaneous photographs using one of two possible setups: a single view setup using a coaxially aligned camera pair with a polarizing beam splitter, and a multi-view stereo setup with different orientations of linear polarizing filters placed on the cameras, enabling high-quality geometry reconstruction. From this lighting we derive full-color diffuse albedo, single-channel specular albedo suitable for dielectric materials, and polarization-preserving surface normals which are free of corruption from subsurface scattering. We provide simple formulae to estimate the diffuse albedo, specular albedo, and surface normal maps in the single-view and multi-view cases and show error bounds which are small for many common subjects including faces.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Building a Life-Size Automultiscopic Display Using Consumer Hardware Proceedings Article
In: Proceedings of GPU Technology Conference, San Jose, CA, 2015.
@inproceedings{jones_building_2015,
title = {Building a Life-Size Automultiscopic Display Using Consumer Hardware},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Building%20a%20Life-Size%20Automultiscopic%20Display%20Using%20Consumer%20Hardware.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {Proceedings of GPU Technology Conference},
address = {San Jose, CA},
abstract = {Automultiscopic displays allow multiple users to experience 3D content without the hassle of special glasses or head gear. Such displays generate many simultaneous images with high-angular density, so that each eye perceives a distinct and different view. This presents a unique challenge for content acquisition and rendering. In this talk, we explain how to build an automultiscopic display using off-the-shelf projectors, video-splitters, and graphics cards. We also present a GPU-based algorithm for rendering a large numbers of views from a sparse array of video cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Alexander, Oleg; Feng, Andrew W.; Fyffe, Graham; Ichikari, Ryosuke; Debevec, Paul; Wang, Rhuizhe; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors Proceedings Article
In: Proceedings of the 19th Symposium on Interactive 3D Graphics and Games, pp. 134–134, ACM Press, San Francisco, CA, 2015, ISBN: 978-1-4503-3392-4.
@inproceedings{casas_rapid_2015,
title = {Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors},
author = {Dan Casas and Oleg Alexander and Andrew W. Feng and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2699276.2721398},
doi = {10.1145/2699276.2721398},
isbn = {978-1-4503-3392-4},
year = {2015},
date = {2015-02-01},
booktitle = {Proceedings of the 19th Symposium on Interactive 3D Graphics and Games},
pages = {134–134},
publisher = {ACM Press},
address = {San Francisco, CA},
abstract = {Creating and animating a realistic 3D human face has been an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. In this paper, we propose a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single Microsoft Kinect and requires no artistic expertise on the part of the capture subject. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires 4 minutes of processing time to transform it into a blendshape model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmark detection and optical flow. We demonstrate the effectiveness of the proposed method by capturing 3D facial models of different human subjects and puppeteering their models in an animation system with real-time facial performance retargeting.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Debevec, Paul
Driving High-Resolution Facial Scans with Video Performance Capture Journal Article
In: ACM Transactions on Graphics (TOG), vol. 34, no. 1, pp. 1– 13, 2014.
@article{fyffe_driving_2014,
title = {Driving High-Resolution Facial Scans with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Scans%20with%20Video%20Performance%20Capture.pdf},
year = {2014},
date = {2014-11-01},
journal = {ACM Transactions on Graphics (TOG)},
volume = {34},
number = {1},
pages = {1– 13},
abstract = {We present a process for rendering a realistic facial performance with control of viewpoint and illumination. The performance is based on one or more high-quality geometry and reflectance scans of an actor in static poses, driven by one or more video streams of a performance. We compute optical flow correspondences between neighboring video frames, and a sparse set of correspondences between static scans and video frames. The latter are made possible by leveraging the relightability of the static 3D scans to match the viewpoint(s) and appearance of the actor in videos taken in arbitrary environments. As optical flow tends to compute proper correspondence for some areas but not others, we also compute a smoothed, per-pixel confidence map for every computed flow, based on normalized cross-correlation. These flows and their confidences yield a set of weighted triangulation constraints among the static poses and the frames of a performance. Given a single artist-prepared face mesh for one static pose, we optimally combine the weighted triangulation constraints, along with a shape regularization term, into a consistent 3D geometry solution over the entire performance that is drift free by construction. In contrast to previous work, even partial correspondences contribute to drift minimization, for example, where a successful match is found in the eye region but not the mouth. Our shape regularization employs a differential shape term based on a spatially varying blend of the differential shapes of the static poses and neighboring dynamic poses, weighted by the associated flow confidences. These weights also permit dynamic reflectance maps to be produced for the performance by blending the static scan maps. Finally, as the geometry and maps are represented on a consistent artist-friendly mesh, we render the resulting high-quality animated face geometry and animated reflectance maps using standard rendering tools.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nagano, Koki; Alexander, Oleg; Barbic, Jernej; Debevec, Paul
Measurement and Modeling of Microfacet Distributions under Deformation Proceedings Article
In: Proceedings of SIGDIAL 2014, ACM, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2960-6.
@inproceedings{nagano_measurement_2014,
title = {Measurement and Modeling of Microfacet Distributions under Deformation},
author = {Koki Nagano and Oleg Alexander and Jernej Barbic and Paul Debevec},
url = {http://ict.usc.edu/pubs/Measurement%20and%20Modeling%20of%20Microfacet%20Distribution%20under%20Deformation%20(abstract%20for%20talk).pdf},
doi = {10.1145/2614106.2614124},
isbn = {978-1-4503-2960-6},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of SIGDIAL 2014},
publisher = {ACM},
address = {Vancouver, British Columbia, Canada},
abstract = {We endeavor to model dynamic microfacet distributions of rough surfaces such as skin to simulate the changes in surface BRDF under stretching and compression. We begin by measuring microfacet distributions at 5-micron scale of several surface patches under controlled deformation. Generally speaking, rough surfaces become flatter and thus shinier as they are pulled tighter, and become rougher under compression. From this data, we build a model of how surface reflectance changes as the material deforms. We then simulate dynamic surface reflectance by modifying the anisotropic roughness parameters of a microfacet distribution model in accordance with animated surface deformations. Furthermore, we directly render such dynamic appearance by driving dynamic micro geometries to demonstrate how they influence the meso-scale surface reflectance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man” Proceedings Article
In: SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques, Vancouver, Canada, 2014.
@inproceedings{jones_creating_2014,
title = {Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man”},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Creating%20a%20life-sized%20automulitscopic%20Morgan%20Spurlock%20for%20CNNs%20%e2%80%9cInside%20Man%e2%80%9d%20(abstract).pdf},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Vancouver, Canada},
abstract = {We present a system for capturing and rendering life-size 3D human subjects on an automultiscopic display. Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pahlen, Javier; Jimenez, Jorge; Danvoye, Etienne; Debevec, Paul; Fyffe, Graham; Alexander, Oleg
Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters Proceedings Article
In: SIGGRAPH '14 ACM SIGGRAPH 2014 Courses, pp. 1–384, ACM Press, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2962-0.
@inproceedings{von_der_pahlen_digital_2014,
title = {Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters},
author = {Javier Pahlen and Jorge Jimenez and Etienne Danvoye and Paul Debevec and Graham Fyffe and Oleg Alexander},
url = {http://ict.usc.edu/pubs/Digial%20Ira%20and%20Beyond%20-%20Creating%20Photoreal%20Real-Time%20Digital%20Characters%20(course%20notes).pdf},
doi = {10.1145/2614028.2615407},
isbn = {978-1-4503-2962-0},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH '14 ACM SIGGRAPH 2014 Courses},
pages = {1–384},
publisher = {ACM Press},
address = {Vancouver, British Columbia, Canada},
abstract = {This course explains a complete process for creating next-generation realtime digital human characters, using the Digital Ira collaboration between USC ICT and Activision as an example, covering highres facial scanning, blendshape rigging, video-based performance capture, animation compression, realtime skin and eye shading, hair, latest results, and future directions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
Interpolating vertical parallax for an autostereoscopic three-dimensional projector array Journal Article
In: Journal of Electronic Imaging, vol. 23, no. 1, 2014, ISSN: 1017-9909.
@article{jones_interpolating_2014,
title = {Interpolating vertical parallax for an autostereoscopic three-dimensional projector array},
author = {Andrew Jones and Koki Nagano and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://electronicimaging.spiedigitallibrary.org/article.aspx?doi=10.1117/1.JEI.23.1.011005},
doi = {10.1117/1.JEI.23.1.011005},
issn = {1017-9909},
year = {2014},
date = {2014-03-01},
journal = {Journal of Electronic Imaging},
volume = {23},
number = {1},
abstract = {We present a technique for achieving tracked vertical parallax for multiple users using a variety of autostereoscopic projector array setups, including front- and rear-projection and curved display surfaces. This hybrid parallax approach allows for immediate horizontal parallax as viewers move left and right and tracked parallax as they move up and down, allowing cues such as three-dimensional (3-D) perspective and eye contact to be conveyed faithfully. We use a low-cost RGB-depth sensor to simultaneously track multiple viewer head positions in 3-D space, and we interactively update the imagery sent to the array so that imagery directed to each viewer appears from a consistent and correct vertical perspective. Unlike previous work, we do not assume that the imagery sent to each projector in the array is rendered from a single vertical perspective. This lets us apply hybrid parallax to displays where a single projector forms parts of multiple viewers’ imagery. Thus, each individual projected image is rendered with multiple centers of projection, and might show an object from above on the left and from below on the right. We demonstrate this technique using a dense horizontal array of pico-projectors aimed into an anisotropic vertical diffusion screen, yielding 1.5 deg angular resolution over 110 deg field of view. To create a seamless viewing experience for multiple viewers, we smoothly interpolate the set of viewer heights and distances on a per-vertex basis across the array’s field of view, reducing image distortion, cross talk, and artifacts from tracking errors.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Smith, Stephen; Traum, David; Alexander, Oleg; Leuski, Anton; Jones, Andrew; Georgila, Kallirroi; Debevec, Paul; Swartout, William; Maio, Heather
Time-offset Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of IUI 2014, pp. 163–168, ACM Press, Haifa, Israel, 2014, ISBN: 978-1-4503-2184-6.
@inproceedings{artstein_time-offset_2014,
title = {Time-offset Interaction with a Holocaust Survivor},
author = {Ron Artstein and Stephen Smith and David Traum and Oleg Alexander and Anton Leuski and Andrew Jones and Kallirroi Georgila and Paul Debevec and William Swartout and Heather Maio},
url = {http://ict.usc.edu/pubs/Time-Offset%20Interaction%20with%20a%20Holocaust%20Survivor.pdf},
doi = {10.1145/2557500.2557540},
isbn = {978-1-4503-2184-6},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of IUI 2014},
pages = {163–168},
publisher = {ACM Press},
address = {Haifa, Israel},
abstract = {Time-offset interaction is a new technology that allows for two-way communication with a person who is not available for conversation in real time: a large set of statements are prepared in advance, and users access these statements through natural conversation that mimics face-to-face interaction. Conversational reactions to user questions are retrieved through a statistical classifier, using technology that is similar to previous interactive systems with synthetic characters; however, all of the retrieved utterances are genuine statements by a real person. Recordings of answers, listening and idle behaviors, and blending techniques are used to create a persistent visual image of the person throughout the interaction. A proof-of-concept has been implemented using the likeness of Pinchas Gutter, a Holocaust survivor, enabling short conversations about his family, his religious views, and resistance. This proof-of-concept has been shown to dozens of people, from school children to Holocaust scholars, with many commenting on the impact of the experience and potential for this kind of interface.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Jones, Andrew; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
An Autostereoscopic Projector Array Optimized for 3D Facial Display Proceedings Article
In: SIGGRAPH 2013 Emerging Technologies, 2013.
@inproceedings{nagano_autostereoscopic_2013,
title = {An Autostereoscopic Projector Array Optimized for 3D Facial Display},
author = {Koki Nagano and Andrew Jones and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Autostereoscopic%20Projector%20Array%20Optimized%20for%203D%20Facial%20Display%20.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH 2013 Emerging Technologies},
abstract = {Video projectors are rapidly shrinking in size, power consumption, and cost. Such projectors provide unprecedented flexibility to stack, arrange, and aim pixels without the need for moving parts. This dense projector display is optimized in size and resolution to display an autostereoscopic life-sized 3D human face. It utilizes 72 Texas Instruments PICO projectors to illuminate a 30 cm x 30 cm anisotropic screen with a wide 110-degree field of view. The demonstration includes both live scanning of subjects and virtual animated characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Graham, Paul; Nagano, Koki; Busch, Jay; Debevec, Paul
Driving High-Resolution Facial Blendshapes with Video Performance Capture Proceedings Article
In: SIGGRAPH, Anaheim, CA, 2013.
@inproceedings{fyffe_driving_2013,
title = {Driving High-Resolution Facial Blendshapes with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Graham and Koki Nagano and Jay Busch and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Blendshapes%20with%20Video%20Performance.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH},
address = {Anaheim, CA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Alexander, Oleg; Busch, Jay; Graham, Paul; Tunwattanapong, Borom; Jones, Andrew; Nagano, Koki; Ichikari, Ryosuke; Debevec, Paul; Fyffe, Graham
Digital Ira: High-Resolution Facial Performance Playback Proceedings Article
In: SIGGRAPH 2013 Real-Time Live! The 40th International Conference and Exhibition on Computer Graphics and Interactive Techniques, Anaheim, CA, 2013.
@inproceedings{alexander_digital_2013,
title = {Digital Ira: High-Resolution Facial Performance Playback},
author = {Oleg Alexander and Jay Busch and Paul Graham and Borom Tunwattanapong and Andrew Jones and Koki Nagano and Ryosuke Ichikari and Paul Debevec and Graham Fyffe},
url = {http://gl.ict.usc.edu/Research/DigitalIra/},
doi = {10.1145/2503385.2503387},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH 2013 Real-Time Live! The 40th International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Anaheim, CA},
abstract = {In this collaboration between Activision and USC ICT, we tried to create a real-time, photoreal digital human character which could be seen from any viewpoint, any lighting, and could perform realistically from video performance capture even in a tight closeup. In addition, we needed this to run in a game-ready production pipeline. To achieve this, we scanned the actor in thirty high-resolution expressions using the USC ICT's new Light Stage X system [Ghosh et al. SIGGRAPHAsia2011] and chose eight expressions for the real-time performance rendering. To record the performance, we shot multi-view 30fps video of the actor performing improvised lines using the same multi-camera rig. We used a new tool called Vuvuzela to interactively and precisely correspond all expression (u,v)'s to the neutral expression, which was retopologized to an artist mesh. Our new offline animation solver works by creating a performance graph representing dense GPU optical flow between the video frames and the eight expressions. This graph gets pruned by analyzing the correlation between the video frames and the expression scans over twelve facial regions. The algorithm then computes dense optical flow and 3D triangulation yielding per-frame spatially varying blendshape weights approximating the performance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tunwattanapong, Borom; Fyffe, Graham; Graham, Paul; Busch, Jay; Yu, Xueming; Ghosh, Abhijeet; Debevec, Paul
Acquiring Reflectance and Shape from Continuous Spherical Harmonic Illumination Journal Article
In: ACM Transactions on Graphics, vol. 32, no. 4, 2013, ISSN: 07300301.
@article{tunwattanapong_acquiring_2013,
title = {Acquiring Reflectance and Shape from Continuous Spherical Harmonic Illumination},
author = {Borom Tunwattanapong and Graham Fyffe and Paul Graham and Jay Busch and Xueming Yu and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Acquiring%20Re%ef%ac%82ectance%20and%20Shape%20from%20Continuous%20Spherical%20Harmonic%20Illumination.pdf},
doi = {10.1145/2461912.2461944},
issn = {07300301},
year = {2013},
date = {2013-07-01},
journal = {ACM Transactions on Graphics},
volume = {32},
number = {4},
abstract = {We present a novel technique for acquiring the geometry and spatially-varying reflectance properties of 3D objects by observing them under continuous spherical harmonic illumination conditions. The technique is general enough to characterize either entirely specular or entirely diffuse materials, or any varying combination across the surface of the object. We employ a novel computational illumination setup consisting of a rotating arc of controllable LEDs which sweep out programmable spheres of incident illumination during 1-second exposures. We illuminate the object with a succession of spherical harmonic illumination conditions, as well as photographed environmental lighting for validation. From the response of the object to the harmonics, we can separate diffuse and specular reflections, estimate world-space diffuse and specular normals, and compute anisotropic roughness parameters for each view of the object. We then use the maps of both diffuse and specular reflectance to form correspondences in a multiview stereo algorithm, which allows even highly specular surfaces to be corresponded across views. The algorithm yields a complete 3D model and a set of merged reflectance maps. We use this technique to digitize the shape and reflectance of a variety of objects difficult to acquire with other techniques and present validation renderings which match well to photographs in similar lighting.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Alexander, Oleg; Fyffe, Graham; Busch, Jay; Yu, Xueming; Ichikari, Ryosuke; Jones, Andrew; Debevec, Paul; Jimenez, Jorge; Danvoye, Etienne; Antionazzi, Bernardo; Eheler, Mike; Kysela, Zybnek; Pahlen, Javier
Digital Ira: Creating a Real-Time Photoreal Digital Actor Proceedings Article
In: SIGGRAPH Real Time Live!, Anaheim, CA, 2013, ISBN: 978-1-4503-2342-0.
@inproceedings{alexander_digital_2013-1,
title = {Digital Ira: Creating a Real-Time Photoreal Digital Actor},
author = {Oleg Alexander and Graham Fyffe and Jay Busch and Xueming Yu and Ryosuke Ichikari and Andrew Jones and Paul Debevec and Jorge Jimenez and Etienne Danvoye and Bernardo Antionazzi and Mike Eheler and Zybnek Kysela and Javier Pahlen},
url = {http://dl.acm.org/citation.cfm?doid=2503385.2503387},
doi = {10.1145/2503385.2503387},
isbn = {978-1-4503-2342-0},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH Real Time Live!},
address = {Anaheim, CA},
abstract = {In 2008, the "Digital Emily" project [Alexander et al. 2009] showed how a set of high-resolution facial expressions scanned in a light stage could be rigged into a real-time photoreal digital character and driven with video-based facial animation techniques. However, Digital Emily was rendered offline, involved just the front of the face, and was never seen in a tight closeup. In this collaboration between Activision and USC ICT shown at SIGGRAPH 2013's Real-Time Live venue, we endeavoured to create a real-time, photoreal digital human character which could be seen from any viewpoint, in any lighting, and could perform realistically from video performance capture even in a tight closeup. In addition, we wanted this to run in a real-time game-ready production pipeline, ultimately achieving 180 frames per second for a full-screen character on a two-year old graphics card.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Proceedings Article
In: EUROGRAPHICS, Girona, Spain, 2013.
@inproceedings{graham_measurement-based_2013,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2013},
date = {2013-05-01},
booktitle = {EUROGRAPHICS},
address = {Girona, Spain},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
The Light Stages and Their Applications to Photoreal Digital Actors Proceedings Article
In: SIGGRAPH Asia, Singapore, 2012.
@inproceedings{debevec_light_2012,
title = {The Light Stages and Their Applications to Photoreal Digital Actors},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/The%20Light%20Stages%20and%20Their%20Applications%20to%20Photoreal%20Digital%20Actors.pdf},
year = {2012},
date = {2012-11-01},
booktitle = {SIGGRAPH Asia},
address = {Singapore},
abstract = {The Light Stage systems built at UC Berkeley and USC ICT have enabled a variety of facial scanning and reflectance measurement techniques that have been explored in several research papers and used in various commercial applications. This short paper presents the evolutionary history of the Light Stage Systems and some of the techniques and applications they have enabled.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2012, 2012.
@techreport{graham_measurement-based_2012,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2012.pdf},
year = {2012},
date = {2012-11-01},
number = {ICT TR 01 2012},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a technique for generating microstructure-level facial geometry by augmenting a mesostructure-level facial scan with detail synthesized from a set of exemplar skin patches scanned at much higher resolution. We use constrained texture synthesis based on image analogies to increase the resolution of the facial scan in a way that is consistent with the scanned mesostructure. We digitize the exemplar patches with a polarization-based computational illumination technique which considers specular reflection and single scattering. The recorded microstructure patches can be used to synthesize full-facial microstructure detail for either the same subject or to a different subject. We show that the technique allows for greater realism in facial renderings including more accurate reproduction of skin’s specular roughness and anisotropic reflection effects.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Proceedings Article
In: ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12), Los Angeles, CA, 2012.
@inproceedings{graham_measurement-based_2012-1,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/A%20Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12)},
address = {Los Angeles, CA},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Yufeng; Peers, Pieter; Debevec, Paul; Ghosh, Abhijeet
Estimating Diffusion Parameters from Polarized Spherical Gradient Illumination Proceedings Article
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), 2012.
@inproceedings{zhu_estimating_2012,
title = {Estimating Diffusion Parameters from Polarized Spherical Gradient Illumination},
author = {Yufeng Zhu and Pieter Peers and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Estimating%20Diffusion%20Parameters%20from%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
abstract = {Accurately modeling and reproducing the appearance of real-world materials is crucial for the production of photoreal imagery of digital scenes and subjects. The appearance of many common materials is the result of subsurface light transport that gives rise to the characteristic “soft” appearance and the unique coloring of such materials. Jensen et al. [2001] introduced the dipole-diffusion approximation to efficiently model isotropic subsurface light transport. The scattering parameters needed to drive the dipole-diffusion approximation are typically estimated by illuminating a homogeneous surface patch with a collimated beam of light, or in the case of spatially varying translucent materials with a dense set of structured light patterns. A disadvantage of most existing techniques is that acquisition time is traded off with spatial density of the scattering parameters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Graham, Paul; Busch, Jay; Bolas, Mark
A Cell Phone Based Platform for Facial Performance Capture Proceedings Article
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Los Angeles, CA, 2012.
@inproceedings{debevec_cell_2012,
title = {A Cell Phone Based Platform for Facial Performance Capture},
author = {Paul Debevec and Paul Graham and Jay Busch and Mark Bolas},
url = {http://ict.usc.edu/pubs/A%20Cell%20Phone%20Based%20Platform%20for%20Facial%20Performance%20Capture.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
address = {Los Angeles, CA},
abstract = {We demonstrate a novel light probe which can estimate the full dynamic range of a scene with multiple bright light sources. It places diffuse strips between mirrored spherical quadrants, effectively co-locating diffuse and mirrored probes to record the full dynamic range of illumination in a single exposure. From this image, we estimate the intensity of multiple saturated light sources by solving a linear system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2001
Waese, Jamie; Debevec, Paul
A Real Time High Dynamic Range Light Probe Proceedings Article
In: SIGGRAPH Technical Sketches, 2001.
Links | BibTeX | Tags: Graphics
@inproceedings{waese_real_2001,
title = {A Real Time High Dynamic Range Light Probe},
author = {Jamie Waese and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Real%20Time%20High%20Dynamic%20Range%20Light%20Probe.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Tchou, Chris; Debevec, Paul
Light Stage 2.0 Proceedings Article
In: SIGGRAPH Technical Sketches, pp. 217, 2001.
Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_light_2001,
title = {Light Stage 2.0},
author = {Tim Hawkins and Jonathan Cohen and Chris Tchou and Paul Debevec},
url = {http://ict.usc.edu/pubs/Light%20Stage%202.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
pages = {217},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Debevec, Paul
A Photometric Approach to Digitizing Cultural Artifacts Proceedings Article
In: Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage, Glyfada, Greece, 2001.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_photometric_2001,
title = {A Photometric Approach to Digitizing Cultural Artifacts},
author = {Tim Hawkins and Jonathan Cohen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Photometric%20Approach%20to%20Digitizing%20Cultural%20Artifacts.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage},
address = {Glyfada, Greece},
abstract = {In this paper we present a photometry-based approach to the digital documentation of cultural artifacts. Rather than representing an artifact as a geometric model with spatially varying reflectance properties, we instead propose directly representing the artifact in terms of its reflectance field - the manner in which it transforms light into images. The principal device employed in our technique is a computer-controlled lighting apparatus which quickly illuminates an artifact from an exhaustive set of incident illumination directions and a set of digital video cameras which record the artifact's appearance under these forms of illumination. From this database of recorded images, we compute linear combinations of the captured images to synthetically illuminate the object under arbitrary forms of complex incident illumination, correctly capturing the effects of specular reflection, subsurface scattering, self-shadowing, mutual illumination, and complex BRDF's often present in cultural artifacts. We also describe a computer application that allows users to realistically and interactively relight digitized artifacts.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
2000
Debevec, Paul; Hawkins, Tim; Tchou, Chris; Duiker, Haarm-Pieter; Sarokin, Westley
Acquiring the Reflectance Field of a Human Face Proceedings Article
In: SIGGRAPH, New Orleans, LA, 2000.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_acquiring_2000,
title = {Acquiring the Reflectance Field of a Human Face},
author = {Paul Debevec and Tim Hawkins and Chris Tchou and Haarm-Pieter Duiker and Westley Sarokin},
url = {http://ict.usc.edu/pubs/Acquiring%20the%20Re%EF%AC%82ectance%20Field%20of%20a%20Human%20Face.pdf},
year = {2000},
date = {2000-07-01},
booktitle = {SIGGRAPH},
address = {New Orleans, LA},
abstract = {We present a method to acquire the reflectance field of a human face and use these measurements to render the face under arbitrary changes in lighting and viewpoint. We first acquire images of the face from a small set of viewpoints under a dense sampling of incident illumination directions using a light stage. We then construct a reflectance function image for each observed image pixel from its values over the space of illumination directions. From the reflectance functions, we can directly generate images of the face from the original viewpoints in any form of sampled or computed illumination. To change the viewpoint, we use a model of skin reflectance to estimate the appearance of the reflectance functions for novel viewpoints. We demonstrate the technique with synthetic renderings of a person's face under novel illumination and viewpoints.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
0000
Chen, Haiwei; Zhao, Yajie
Don't Look into the Dark: Latent Codes for Pluralistic Image Inpainting Proceedings Article
In: pp. 7591–7600, 0000.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, VGL
@inproceedings{chen_dont_nodate,
title = {Don't Look into the Dark: Latent Codes for Pluralistic Image Inpainting},
author = {Haiwei Chen and Yajie Zhao},
url = {https://openaccess.thecvf.com/content/CVPR2024/html/Chen_Dont_Look_into_the_Dark_Latent_Codes_for_Pluralistic_Image_CVPR_2024_paper.html},
pages = {7591–7600},
abstract = {We present a method for large-mask pluralistic image inpainting based on the generative framework of discrete latent codes. Our method learns latent priors discretized as tokens by only performing computations at the visible locations of the image. This is realized by a restrictive partial encoder that predicts the token label for each visible block a bidirectional transformer that infers the missing labels by only looking at these tokens and a dedicated synthesis network that couples the tokens with the partial image priors to generate coherent and pluralistic complete image even under extreme mask settings. Experiments on public benchmarks validate our design choices as the proposed method outperforms strong baselines in both visual quality and diversity metrics.},
keywords = {DTIC, Graphics, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}