Publications
Search
Olszewski, Kyle; Lim, Joseph J.; Saito, Shunsuke; Li, Hao
High-fidelity facial and speech animation for VR HMDs Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 6, pp. 1–14, 2016, ISSN: 07300301.
@article{olszewski_high-fidelity_2016,
title = {High-fidelity facial and speech animation for VR HMDs},
author = {Kyle Olszewski and Joseph J. Lim and Shunsuke Saito and Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=2980179.2980252},
doi = {10.1145/2980179.2980252},
issn = {07300301},
year = {2016},
date = {2016-11-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {6},
pages = {1–14},
abstract = {Several significant challenges currently prohibit expressive interaction in virtual reality (VR). The occlusion introduced by modern head-mounted displays (HMDs) makes most existing techniques for facial tracking intractable in this scenario. Furthermore, even state-of-the-art techniques used for real-time facial tracking in less constrained environments fail to capture subtle details of the user’s facial expressions that are essential for compelling speech animation. We introduce a novel system for HMD users to control a digital avatar in real-time while producing plausible speech animation and emotional expressions. Using a monocular camera attached to the front of an HMD, we record video sequences from multiple subjects performing a variety of facial expressions and speaking several phonetically-balanced sentences. These images are used with artist-generated animation data corresponding to these sequences to train a convolutional neural network (CNN) to regress images of a user’s mouth region to the parameters that control a digital avatar. To make training this system more tractable, we make use of audiobased alignment techniques to map images of multiple users making the same utterance to the corresponding animation parameters. We demonstrate that our regression technique is also feasible for tracking the expressions around the user’s eye region, including the eyebrows, with an infrared (IR) camera within the HMD, thereby enabling full facial tracking. This system requires no user-specific calibration, makes use of easily obtainable consumer hardware, and produces high-quality animations of both speech and emotional expressions. Finally, we demonstrate the quality of our system on a variety of subjects and evaluate its performance against state-of-the-art realtime facial tracking techniques.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Efficient Multispectral Reflectance Function Capture for Image-Based Relighting Proceedings Article
In: Proceedings of the Color and Imaging Conference, pp. 47–58, Society for Imaging Science and Technology, San Diego, CA, 2016.
@inproceedings{legendre_efficient_2016,
title = {Efficient Multispectral Reflectance Function Capture for Image-Based Relighting},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://www.ingentaconnect.com/contentone/ist/cic/2016/00002016/00000001/art00008},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the Color and Imaging Conference},
pages = {47–58},
publisher = {Society for Imaging Science and Technology},
address = {San Diego, CA},
abstract = {Image-based relighting (IBRL) renders the appearance of a subject in a novel lighting environment as a linear combination of the images of its reflectance field , the appearance of the subject lit by each incident lighting direction. Traditionally, a tristimulus color camera records the reflectance field as the subject is sequentially illuminated by broad-spectrum white light sources from each direction. Using a multispectral LED sphere and either a tristimulus (RGB) or monochrome camera, we photograph a still life scene to acquire its multispectral reflectance field – its appearance for every lighting direction for multiple incident illumination spectra. For the tristimulus camera, we demonstrate improved color rendition for IBRL when using the multispectral reflectance field, producing a closer match to the scene's actual appearance in a real-world illumination environment. For the monochrome camera, we also show close visual matches. We additionally propose an efficient method for acquiring such multispectral reflectance fields, augmenting the traditional broad-spectrum lighting basis capture with only a few additional images equal to the desired number of spectral channels. In these additional images, we illuminate the subject by a complete sphere of each available narrow-band LED light source, in our case: red, amber, green, cyan, and blue. From the full-sphere illumination images, we promote the white-light reflectance functions for every direction to multispectral, effectively hallucinating the appearance of the subject under each LED spectrum for each lighting direction. We also use polarization imaging to separate the diffuse and specular components of the reflectance functions, spectrally promoting these components according to different models. We validate that the approximated multispectral reflectance functions closely match those generated by a fully multispectral omnidirectional lighting basis, suggesting a rapid multispectral reflectance field capture method which could be applied for live subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Rhuizhe; Wei, Lingyu; Vouga, Etienne; Huang, Qixing; Ceylan, Duygu; Medioni, Gerard; Li, Hao
Capturing Dynamic Textured Surfaces of Moving Targets Proceedings Article
In: Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016 Spotlight Presentation), Springer International Publishing, Amsterdam, The Netherlands, 2016, ISBN: 978-3-319-46477-0 978-3-319-46478-7.
@inproceedings{wang_capturing_2016,
title = {Capturing Dynamic Textured Surfaces of Moving Targets},
author = {Rhuizhe Wang and Lingyu Wei and Etienne Vouga and Qixing Huang and Duygu Ceylan and Gerard Medioni and Hao Li},
url = {https://link.springer.com/chapter/10.1007/978-3-319-46478-7_17},
isbn = {978-3-319-46477-0 978-3-319-46478-7},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016 Spotlight Presentation)},
publisher = {Springer International Publishing},
address = {Amsterdam, The Netherlands},
abstract = {We present an end-to-end system for reconstructing complete watertight and textured models of moving subjects such as clothed humans and animals, using only three or four handheld sensors. The heart of our framework is a new pairwise registration algorithm that minimizes, using a particle swarm strategy, an alignment error metric based on mutual visibility and occlusion. We show that this algorithm reliably registers partial scans with as little as 15% overlap without requiring any initial correspondences, and outperforms alternative global registration algorithms. This registration algorithm allows us to reconstruct moving subjects from free-viewpoint video produced by consumer-grade sensors, without extensive sensor calibration, constrained capture volume, expensive arrays of cameras, or templates of the subject geometry.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Saito, Shunsuke; Li, Tianye; Li, Hao
Real-Time Facial Segmentation and Performance Capture from RGB Input Proceedings Article
In: Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016), pp. 244–261, Springer International Publishing, Amsterdam, The Netherlands, 2016, ISBN: 978-3-319-46483-1 978-3-319-46484-8.
@inproceedings{saito_real-time_2016,
title = {Real-Time Facial Segmentation and Performance Capture from RGB Input},
author = {Shunsuke Saito and Tianye Li and Hao Li},
url = {https://link.springer.com/chapter/10.1007/978-3-319-46484-8_15},
isbn = {978-3-319-46483-1 978-3-319-46484-8},
year = {2016},
date = {2016-10-01},
booktitle = {Proceedings of the 14th European Conference on Computer Vision and Pattern Recognition, (ECCV 2016)},
pages = {244–261},
publisher = {Springer International Publishing},
address = {Amsterdam, The Netherlands},
abstract = {We introduce the concept of unconstrained real-time 3D facial performance capture through explicit semantic segmentation in the RGB input. To ensure robustness, cutting edge supervised learning approaches rely on large training datasets of face images captured in the wild. While impressive tracking quality has been demonstrated for faces that are largely visible, any occlusion due to hair, accessories, or hand-to-face gestures would result in significant visual artifacts and loss of tracking accuracy. The modeling of occlusions has been mostly avoided due to its immense space of appearance variability. To address this curse of high dimensionality, we perform tracking in unconstrained images assuming non-face regions can be fully masked out. Along with recent breakthroughs in deep learning, we demonstrate that pixel-level facial segmentation is possible in real-time by repurposing convolutional neural networks designed originally for general semantic segmentation. We develop an efficient architecture based on a two-stream deconvolution network with complementary characteristics, and introduce carefully designed training samples and data augmentation strategies for improved segmentation accuracy and robustness. We adopt a state-of-the-art regression-based facial tracking framework with segmented face images as training, and demonstrate accurate and uninterrupted facial performance capture in the presence of extreme occlusion and even side views. Furthermore, the resulting segmentation can be directly used to composite partial 3D face models on the input images and enable seamless facial manipulation tasks, such as virtual make-up or face replacement.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Barreto, Joseph; Alexander, Oleg; Bolas, Mark; Debevec, Paul; Unger, Jonas
Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array Proceedings Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18–26, Las Vegas, NV, 2016.
@inproceedings{jones_time-offset_2016,
title = {Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array},
author = {Andrew Jones and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Joseph Barreto and Oleg Alexander and Mark Bolas and Paul Debevec and Jonas Unger},
url = {http://www.cv-foundation.org//openaccess/content_cvpr_2016_workshops/w16/papers/Jones_Time-Offset_Conversations_on_CVPR_2016_paper.pdf},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},
pages = {18–26},
address = {Las Vegas, NV},
abstract = {We present a system for creating and displaying interactive life-sized 3D digital humans based on pre-recorded interviews. We use 30 cameras and an extensive list of questions to record a large set of video responses. Users access videos through a natural conversation interface that mimics face-to-face interaction. Recordings of answers, listening and idle behaviors are linked together to create a persistent visual image of the person throughout the interaction. The interview subjects are rendered using flowed light fields and shown life-size on a special rear-projection screen with an array of 216 video projectors. The display allows multiple users to see different 3D perspectives of the subject in proper relation to their viewpoints, without the need for stereo glasses. The display is effective for interactive conversations since it provides 3D cues such as eye gaze and spatial hand gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Liu, Dai; Busch, Jay; Jones, Andrew; Pattanaik, Sumanta; Debevec, Paul
Practical Multispectral Lighting Reproduction Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 4, pp. 1–11, 2016, ISSN: 07300301.
@article{legendre_practical_2016,
title = {Practical Multispectral Lighting Reproduction},
author = {Chloe LeGendre and Xueming Yu and Dai Liu and Jay Busch and Andrew Jones and Sumanta Pattanaik and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2925934},
doi = {10.1145/2897824.2925934},
issn = {07300301},
year = {2016},
date = {2016-07-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {4},
pages = {1–11},
abstract = {We present a practical framework for reproducing omnidirectional incident illumination conditions with complex spectra using a light stage with multispectral LED lights. For lighting acquisition, we augment standard RGB panoramic photography with one or more observations of a color chart with numerous reflectance spectra. We then solve for how to drive the multispectral light sources so that they best reproduce the appearance of the color charts in the original lighting. Even when solving for non-negative intensities, we show that accurate lighting reproduction is achievable using just four or six distinct LED spectra for a wide range of incident illumination spectra. A significant benefit of our approach is that it does not require the use of specialized equipment (other than the light stage) such as monochromators, spectroradiometers, or explicit knowledge of the LED power spectra, camera spectral response functions, or color chart reflectance spectra. We describe two simple devices for multispectral lighting capture, one for slow measurements of detailed angular spectral detail, and one for fast measurements with coarse angular detail. We validate the approach by realistically compositing real subjects into acquired lighting environments, showing accurate matches to how the subject would actually look within the environments, even for those including complex multispectral illumination. We also demonstrate dynamic lighting capture and playback using the technique.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Optimal LED selection for multispectral lighting reproduction Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, ACM, New York, NY, 2016, ISBN: 978-1-4503-4371-8.
@inproceedings{legendre_optimal_2016,
title = {Optimal LED selection for multispectral lighting reproduction},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2945150},
doi = {10.1145/2945078.2945150},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
publisher = {ACM},
address = {New York, NY},
abstract = {We demonstrate the sufficiency of using as few as five LEDs of distinct spectra for multispectral lighting reproduction and solve for the optimal set of five from 11 such commercially available LEDs. We leverage published spectral reflectance, illuminant, and camera spectral sensitivity datasets to show that two approaches of lighting reproduction, matching illuminant spectra directly and matching material color appearance observed by one or more cameras or a human observer, yield the same LED selections. Our proposed optimal set of five LEDs includes red, green, and blue with narrow emission spectra, along with white and amber with broader spectra.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Graham, Paul; Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Journal Article
In: Computer Graphics Forum, 2016, ISSN: 1467-8659.
@article{fyffe_near-instant_2016,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Graham Fyffe and Paul Graham and Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.12837/abstract},
doi = {10.1111/cgf.12837},
issn = {1467-8659},
year = {2016},
date = {2016-05-01},
journal = {Computer Graphics Forum},
abstract = {We present a near-instant method for acquiring facial geometry and reflectance using a set of commodity DSLR cameras and flashes. Our setup consists of twenty-four cameras and six flashes which are fired in rapid succession with subsets of the cameras. Each camera records only a single photograph and the total capture time is less than the 67ms blink reflex. The cameras and flashes are specially arranged to produce an even distribution of specular highlights on the face. We employ this set of acquired images to estimate diffuse color, specular intensity, specular exponent, and surface orientation at each point on the face. We further refine the facial base geometry obtained from multi-view stereo using estimated diffuse and specular photometric information. This allows final submillimeter surface mesostructure detail to be obtained via shape-from-specularity. The final system uses commodity components and produces models suitable for authoring high-quality digital human characters.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Traum, David; Jones, Andrew; Hays, Kia; Maio, Heather; Alexander, Oleg; Artstein, Ron; Debevec, Paul; Gainer, Alesia; Georgila, Kallirroi; Haase, Kathleen; Jungblut, Karen; Leuski, Anton; Smith, Stephen; Swartout, William
New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling Book Section
In: Interactive Storytelling, vol. 9445, pp. 269–281, Springer International Publishing, Copenhagen, Denmark, 2015, ISBN: 978-3-319-27035-7 978-3-319-27036-4.
@incollection{traum_new_2015,
title = {New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling},
author = {David Traum and Andrew Jones and Kia Hays and Heather Maio and Oleg Alexander and Ron Artstein and Paul Debevec and Alesia Gainer and Kallirroi Georgila and Kathleen Haase and Karen Jungblut and Anton Leuski and Stephen Smith and William Swartout},
url = {http://link.springer.com/10.1007/978-3-319-27036-4_26},
isbn = {978-3-319-27035-7 978-3-319-27036-4},
year = {2015},
date = {2015-12-01},
booktitle = {Interactive Storytelling},
volume = {9445},
pages = {269–281},
publisher = {Springer International Publishing},
address = {Copenhagen, Denmark},
abstract = {We describe a digital system that allows people to have an interactive conversation with a human storyteller (a Holocaust survivor) who has recorded a number of dialogue contributions, including many compelling narratives of his experiences and thoughts. The goal is to preserve as much as possible of the experience of face-to-face interaction. The survivor's stories, answers to common questions, and testimony are recorded in high ⬚delity, and then delivered interactively to an audience as responses to spoken questions. People can ask questions and receive answers on a broad range of topics including the survivor's experiences before, after and during the war, his attitudes and philosophy. Evaluation results show that most user questions can be addressed by the system, and that audiences are highly engaged with the resulting interaction.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Bolas, Mark; Debevec, Paul
An Automultiscopic Projector Array for Interactive Digital Humans Proceedings Article
In: SIGGRAPH 2015, pp. 1–1, ACM Press, Los Angeles, CA, 2015, ISBN: 978-1-4503-3635-2.
@inproceedings{jones_automultiscopic_2015,
title = {An Automultiscopic Projector Array for Interactive Digital Humans},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Automultiscopic%20Projector%20Array%20for%20Interactive%20Digital%20Humans.pdf},
doi = {10.1145/2782782.2792494},
isbn = {978-1-4503-3635-2},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015},
pages = {1–1},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear. Our display uses a dense array of video projectors to generate many images with high-angular density over a wide-field of view. As each user moves around the display, their eyes smoothly transition from one view to the next. The display is ideal for displaying life-size human subjects as it allows for natural personal interactions with 3D cues such as eye gaze and spatial hand gestures. In this installation, we will explore ”time-offset” interactions with recorded 3D human subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Metallo, Adam; Rossi, Vincent; Blundell, Jonathan; Waibel, Günter; Graham, Paul; Fyffe, Graham; Yu, Xueming; Debevec, Paul
Scanning and printing a 3D portrait of president Barack Obama Proceedings Article
In: SIGGRAPH 2015: Studio, pp. 19, ACM, Los Angeles, CA, 2015.
@inproceedings{metallo_scanning_2015,
title = {Scanning and printing a 3D portrait of president Barack Obama},
author = {Adam Metallo and Vincent Rossi and Jonathan Blundell and Günter Waibel and Paul Graham and Graham Fyffe and Xueming Yu and Paul Debevec},
url = {http://ict.usc.edu/pubs/Scanning%20and%20Printing%20a%203D%20Portrait%20of%20President%20Barack%20Obama.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015: Studio},
pages = {19},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {On June 9th, 2014, we traveled to the State Dining Room of The White House to create a 3D Portrait of President Barack Obama using state-of-the-art 3D scanning and printing technology, producing the modern equivalent of the plaster life masks of President Lincoln from the 1860's.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Xueming; Wang, Shanhe; Busch, Jay; Phan, Thai; McSheery, Tracy; Bolas, Mark; Debevec, Paul
Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Posters, pp. 94, ACM, Los Angeles, CA, 2015.
@inproceedings{yu_virtual_2015,
title = {Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking},
author = {Xueming Yu and Shanhe Wang and Jay Busch and Thai Phan and Tracy McSheery and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Headcam%20-%20Pantilt%20Mirror-based%20Facial%20Performance%20Tracking.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Posters},
pages = {94},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {High-end facial performance capture solutions typically use head-mounted camera systems which provide one or more close-up video streams of each actor's performance. These provide clear views of each actor's performance, but can be bulky, uncomfortable, get in the way of sight lines, and prevent actors from getting close to each other. To address this, we propose a virtual head-mounted camera system: an array of cameras placed around around the performance capture volume which automatically track zoomed-in, sharply focussed, high-resolution views of the each actor's face from a multitude of directions. The resulting imagery can be used in conjunction with body motion capture data to derive nuanced facial performances without head-mounted cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham
Skin Stretch - Simulating Dynamic Skin Microgeometry Proceedings Article
In: ACM SIGGRAPH 2015 Computer Animation Festival, pp. 133, Los Angeles, CA, 2015.
@inproceedings{nagano_skin_2015,
title = {Skin Stretch - Simulating Dynamic Skin Microgeometry},
author = {Koki Nagano and Graham Fyffe},
url = {http://ict.usc.edu/pubs/Skin%20Stretch%20-%20Simulating%20Dynamic%20Skin%20Microgeometry.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
pages = {133},
address = {Los Angeles, CA},
abstract = {This demonstration of the effects of skin microstructure deformation on high-resolution dynamic facial rendering features the state-of-the-art skin in microstructure simulation, facial scanning, and rendering. Facial animations made with the technique show more realistic and expressive skin under facial expression.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Fyffe, Graham; Tonwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Talks, pp. 1–1, ACM Press, 2015, ISBN: 978-1-4503-3636-9.
@inproceedings{graham_near-instant_2015,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Paul Graham and Graham Fyffe and Borom Tonwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Near-Instant%20Capture%20of%20High-Resolution%20Facial%20Geometry%20and%20Reflectance.pdf},
doi = {10.1145/2775280.2792561},
isbn = {978-1-4503-3636-9},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Talks},
pages = {1–1},
publisher = {ACM Press},
abstract = {Modeling realistic human characters is frequently done using 3D recordings of the shape and appearance of real people, often across a set of different facial expressions to build blendshape facial models. Believable characters that cross the "Uncanny Valley" require high-quality geometry, texture maps, reflectance properties, and surface detail at the level of skin pores and fine wrinkles. Unfortunately, there has not yet been a technique for recording such datasets that is near-instantaneous and low-cost. While some facial capture techniques are instantaneous and inexpensive [Beeler et al. 2010], these do not generally provide lighting-independent texture maps, specular reflectance information, or high-resolution surface normal detail for relighting. In contrast, techniques which use multiple photographs from spherical lighting setups [Ghosh et al. 2011] do capture such reflectance properties, at the expense of longer capture times and complicated custom equipment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham; Alexander, Oleg; Barbiç, Jernej; Li, Hao; Ghosh, Abhijeet; Debevec, Paul
Skin Microstructure Deformation with Displacement Map Convolution Journal Article
In: ACM Transactions on Graphics, vol. 34, no. 4, pp. 1–10, 2015, ISSN: 07300301.
@article{nagano_skin_2015-1,
title = {Skin Microstructure Deformation with Displacement Map Convolution},
author = {Koki Nagano and Graham Fyffe and Oleg Alexander and Jernej Barbiç and Hao Li and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Skin%20Microstructure%20Deformation%20with%20Displacement%20Map%20Convolution.pdf},
doi = {10.1145/2766894},
issn = {07300301},
year = {2015},
date = {2015-07-01},
journal = {ACM Transactions on Graphics},
volume = {34},
number = {4},
pages = {1–10},
abstract = {We present a technique for synthesizing the effects of skin microstructure deformation by anisotropically convolving a high-resolution displacement map to match normal distribution changes in measured skin samples. We use a 10-micron resolution scanning technique to measure several in vivo skin samples as they are stretched and compressed in different directions, quantifying how stretching smooths the skin and compression makes it rougher. We tabulate the resulting surface normal distributions, and show that convolving a neutral skin microstructure displacement map with blurring and sharpening filters can mimic normal distribution changes and microstructure deformations. We implement the spatially-varying displacement map filtering on the GPU to interactively render the effects of dynamic microgeometry on animated faces obtained from high-resolution facial scans.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Saito, Shunsuke; Huang, Zeng; Natsume, Ryota; Morishima, Shigeo; Kanazawa, Angjoo; Li, Hao
PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization Journal Article
In: arXiv:1905.05172 [cs], 2015.
@article{saito_pifu_2015,
title = {PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization},
author = {Shunsuke Saito and Zeng Huang and Ryota Natsume and Shigeo Morishima and Angjoo Kanazawa and Hao Li},
url = {http://arxiv.org/abs/1905.05172},
year = {2015},
date = {2015-05-01},
journal = {arXiv:1905.05172 [cs]},
abstract = {We introduce Pixel-aligned Implicit Function (PIFu), a highly effective implicit representation that locally aligns pixels of 2D images with the global context of their corresponding 3D object. Using PIFu, we propose an end-to-end deep learning method for digitizing highly detailed clothed humans that can infer both 3D surface and texture from a single image, and optionally, multiple input images. Highly intricate shapes, such as hairstyles, clothing, as well as their variations and deformations can be digitized in a unified way. Compared to existing representations used for 3D deep learning, PIFu can produce high-resolution surfaces including largely unseen regions such as the back of a person. In particular, it is memory efficient unlike the voxel representation, can handle arbitrary topology, and the resulting surface is spatially aligned with the input image. Furthermore, while previous techniques are designed to process either a single image or multiple views, PIFu extends naturally to arbitrary number of views. We demonstrate high-resolution and robust reconstructions on real world images from the DeepFashion dataset, which contains a variety of challenging clothing types. Our method achieves state-of-the-art performance on a public benchmark and outperforms the prior work for clothed human digitization from a single image.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fyffe, Graham; Debevec, Paul
Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination Proceedings Article
In: Preceedings of ICCP 2015, pp. 1–10, IEEE, Houston, Texas, 2015.
@inproceedings{fyffe_single-shot_2015,
title = {Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination},
author = {Graham Fyffe and Paul Debevec},
url = {http://ict.usc.edu/pubs/Single-Shot%20Reflectance%20Measurement%20from%20Polarized%20Color%20Gradient%20Illumination.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Preceedings of ICCP 2015},
pages = {1–10},
publisher = {IEEE},
address = {Houston, Texas},
abstract = {We present a method for acquiring the per-pixel diffuse albedo, specular albedo, and surface normal maps of a subject at a single instant in time. The method is single shot, requiring no optical flow, and per-pixel, making no assumptions regarding albedo statistics or surface connectivity. We photograph the subject inside a spherical illumination device emitting a static lighting pattern of vertically polarized RGB color gradients aligned with the XYZ axes, and horizontally polarized RGB color gradients in versely aligned with the XYZ axes. We capture simultaneous photographs using one of two possible setups: a single view setup using a coaxially aligned camera pair with a polarizing beam splitter, and a multi-view stereo setup with different orientations of linear polarizing filters placed on the cameras, enabling high-quality geometry reconstruction. From this lighting we derive full-color diffuse albedo, single-channel specular albedo suitable for dielectric materials, and polarization-preserving surface normals which are free of corruption from subsurface scattering. We provide simple formulae to estimate the diffuse albedo, specular albedo, and surface normal maps in the single-view and multi-view cases and show error bounds which are small for many common subjects including faces.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Building a Life-Size Automultiscopic Display Using Consumer Hardware Proceedings Article
In: Proceedings of GPU Technology Conference, San Jose, CA, 2015.
@inproceedings{jones_building_2015,
title = {Building a Life-Size Automultiscopic Display Using Consumer Hardware},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Building%20a%20Life-Size%20Automultiscopic%20Display%20Using%20Consumer%20Hardware.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {Proceedings of GPU Technology Conference},
address = {San Jose, CA},
abstract = {Automultiscopic displays allow multiple users to experience 3D content without the hassle of special glasses or head gear. Such displays generate many simultaneous images with high-angular density, so that each eye perceives a distinct and different view. This presents a unique challenge for content acquisition and rendering. In this talk, we explain how to build an automultiscopic display using off-the-shelf projectors, video-splitters, and graphics cards. We also present a GPU-based algorithm for rendering a large numbers of views from a sparse array of video cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Alexander, Oleg; Feng, Andrew W.; Fyffe, Graham; Ichikari, Ryosuke; Debevec, Paul; Wang, Rhuizhe; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors Proceedings Article
In: Proceedings of the 19th Symposium on Interactive 3D Graphics and Games, pp. 134–134, ACM Press, San Francisco, CA, 2015, ISBN: 978-1-4503-3392-4.
@inproceedings{casas_rapid_2015,
title = {Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors},
author = {Dan Casas and Oleg Alexander and Andrew W. Feng and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2699276.2721398},
doi = {10.1145/2699276.2721398},
isbn = {978-1-4503-3392-4},
year = {2015},
date = {2015-02-01},
booktitle = {Proceedings of the 19th Symposium on Interactive 3D Graphics and Games},
pages = {134–134},
publisher = {ACM Press},
address = {San Francisco, CA},
abstract = {Creating and animating a realistic 3D human face has been an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. In this paper, we propose a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single Microsoft Kinect and requires no artistic expertise on the part of the capture subject. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires 4 minutes of processing time to transform it into a blendshape model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmark detection and optical flow. We demonstrate the effectiveness of the proposed method by capturing 3D facial models of different human subjects and puppeteering their models in an animation system with real-time facial performance retargeting.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2010
Ghosh, Abhijeet; Heidrich, Wolfgang; Achutha, Shruthi; O'Toole, Matthew
A Basis Illumination Approach to BRDF Measurement Journal Article
In: International Journal on Computer Vision, vol. 90, no. 2, pp. 183–197, 2010.
Abstract | Links | BibTeX | Tags: Graphics
@article{ghosh_basis_2010,
title = {A Basis Illumination Approach to BRDF Measurement},
author = {Abhijeet Ghosh and Wolfgang Heidrich and Shruthi Achutha and Matthew O'Toole},
url = {http://ict.usc.edu/pubs/A%20Basis%20Illumination%20Approach%20to%20BRDF%20Measurement.pdf},
doi = {10.1007/s11263-008-0151-7},
year = {2010},
date = {2010-01-01},
journal = {International Journal on Computer Vision},
volume = {90},
number = {2},
pages = {183–197},
abstract = {Realistic descriptions of surface reflectance have long been a topic of interest in both computer vision and computer graphics research. In this paper, we describe a novel high speed approach for the acquisition of bidirectional reflectance distribution functions (BRDFs). We develop a new theory for directly measuring BRDFs in a basis representation by projecting incident light as a sequence of basis functions from a spherical zone of directions. We derive an orthonormal basis over spherical zones that is ideally suited for this task. BRDF values outside the zonal directions are extrapolated by re-projecting the zonal measurements into a spherical harmonics basis, or by fitting analytical reflection models to the data. For specular materials, we experiment with alternative basis acquisition approaches such as compressive sensing with a random subset of the higher order orthonormal zonal basis functions, as well as measuring the response to basis defined by an analytical model as a way of optically fitting the BRDF to such a representation. We verify this approach with a compact optical setup that requires no moving parts and only a small number of image measurements. Using this approach, a BRDF can be measured in just a few minutes.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Combining Spherical Harmonics and Point-Source Illumination for Efficient Image-Based Relighting Proceedings Article
In: SIGGRAPH 2010, 2010.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{tunwattanapong_combining_2010,
title = {Combining Spherical Harmonics and Point-Source Illumination for Efficient Image-Based Relighting},
author = {Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Combining%20Spherical%20Harmonics%20and%20Point-Source%20Illumination%20for%20Efficient%20Image-Based%20Relighting.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {SIGGRAPH 2010},
abstract = {Traditional image-based relighting technique requires capturing a dense set of lighting directions surrounding the object and uses the linearity of light transport property together with the illumina- tion data of the target environment to relight an object [Debevec et al. 2000]. However, this can be a very data intensive process because such datasets typically involve photographing hundreds of lighting directions. It is also difficult to modify or edit the data in post-production environments because the data is high dimen- sional. Adjustment has to be made in several dimensions in order to add artistic effects to the result. Difficulty in acquisition pro- cess is also one of the main problems. The capturing process typi- cally lasts long enough to only be suitable for static objects. In this poster, we present a relighting technique which greatly reduces the number of images required for relighting, and still generate real- istic results. We combine spherical harmonics with point lights to achieve efficient image based relighting. Spherical harmonics can efficiently capture smooth low frequency illumination [Ramamoor- thi and Hanrahan 2001] while point lights capture high frequency directional illumination. Combining both techniques, we create re- lighting results which have both low and high frequency illumi- nation data. This technique also benefits the acquisition process by reducing the number of required photographs which results in shorter capture time. In addition, fewer dimensions of the data can potentially simplify modification or editing of reflectance data.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
2009
Fyffe, Graham; Wilson, Cyrus A.; Debevec, Paul
Cosine Lobe Based Relighting from Gradient Illumination Photographs Proceedings Article
In: Conference on Visual Media Production, London, UK, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{fyffe_cosine_2009,
title = {Cosine Lobe Based Relighting from Gradient Illumination Photographs},
author = {Graham Fyffe and Cyrus A. Wilson and Paul Debevec},
url = {http://www.ict.usc.edu/pubs/Cosine%20Lobe%20Based%20Relighting%20from%20Gradient%20Illumination%20Photographs.pdf},
year = {2009},
date = {2009-11-01},
booktitle = {Conference on Visual Media Production},
address = {London, UK},
abstract = {We present an image-based method for relighting a scene by analytically fitting a cosine lobe to the reflectance function at each pixel, based on gradient illumination pho- tographs. Realistic relighting results for many materials are obtained using a single per-pixel cosine lobe obtained from just two color photographs: one under uniform white illumi- nation and the other under colored gradient illumination. For materials with wavelength-dependent scattering, a better fit can be obtained using independent cosine lobes for the red, green, and blue channels, obtained from three monochromatic gradient illumination conditions instead of the colored gradient condition. We explore two cosine lobe reflectance functions, both of which allow an analytic fit to the gradient conditions. One is non-zero over half the sphere of lighting directions, which works well for diffuse and specular materials, but fails for materials with broader scattering such as fur. The other is non-zero everywhere, which works well for broadly scattering materials and still produces visually plausible results for diffuse and specular materials. Additionally, we estimate scene geometry from the photometric normals to produce hard shadows cast by the geometry, while still reconstructing the input photographs exactly.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Alexander, Oleg; Rogers, Mike; Lambeth, William; Chiang, Matt; Debevec, Paul
Creating a Photoreal Digital Actor: The Digital Emily Project Proceedings Article
In: IEEE European Conference on Visual Media Production (CVMP), London, UK, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{alexander_creating_2009,
title = {Creating a Photoreal Digital Actor: The Digital Emily Project},
author = {Oleg Alexander and Mike Rogers and William Lambeth and Matt Chiang and Paul Debevec},
url = {https://dl.acm.org/doi/pdf/10.1145/1667239.1667251},
year = {2009},
date = {2009-11-01},
booktitle = {IEEE European Conference on Visual Media Production (CVMP)},
number = {ICT TR 04 2009},
address = {London, UK},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {The Digital Emily Project is a collaboration between facial animation company Image Metrics and the Graphics Laboratory at the University of Southern California's Institute for Creative Technologies to achieve one of the world's first photorealistic digital facial performances. The project leverages latest-generation techniques in high-resolution face scanning, character rigging, video-based facial animation, and compositing. An actress was first filmed on a studio set speaking emotive lines of dialog in high definition. The lighting on the set was captured as a high dynamic range light probe image. The actress' face was then three-dimensionally scanned in thirty-three facial expressions showing different emotions and mouth and eye movements using a high-resolution facial scanning process accurate to the level of skin pores and fine wrinkles. Lighting-independent diffuse and specular reflectance maps were also acquired as part of the scanning process. Correspondences between the 3D expression scans were formed using a semi-automatic process, allowing a blendshape facial animation rig to be constructed whose expressions closely mirrored the shapes observed in the rich set of facial scans; animated eyes and teeth were also added to the model. Skin texture detail showing dynamic wrinkling was converted into multiresolution displacement maps also driven by the blend shapes. A semi-automatic video-based facial animation system was then used to animate the 3D face rig to match the performance seen in the original video, and this performance was tracked onto the facial motion in the studio video. The final face was illuminated by the captured studio illumination and shading using the acquired reflectance maps with a skin translucency shading algorithm. Using this process, the project was able to render a synthetic facial performance which was generally accepted as being a real face.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Lang, Magnus; Fyffe, Graham; Yu, Xueming; Busch, Jay; McDowall, Ian; Bolas, Mark; Debevec, Paul
Achieving Eye Contact in a One-to-Many 3D Video Teleconferencing System Journal Article
In: ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2009, vol. 28, no. 3, 2009.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@article{jones_achieving_2009,
title = {Achieving Eye Contact in a One-to-Many 3D Video Teleconferencing System},
author = {Andrew Jones and Magnus Lang and Graham Fyffe and Xueming Yu and Jay Busch and Ian McDowall and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Achieving%20Eye%20Contact%20in%20a%20One-to-Many%203D%20Video%20Teleconferencing%20System.pdf},
year = {2009},
date = {2009-08-01},
journal = {ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2009},
volume = {28},
number = {3},
abstract = {We present a set of algorithms and an associated display system capable of producing correctly rendered eye contact between a three-dimensionally transmitted remote participant and a group of observers in a 3D teleconferencing system. The participant's face is scanned in 3D at 30Hz and transmitted in real time to an autostereo- scopic horizontal-parallax 3D display, displaying him or her over more than a 180â—¦ field of view observable to multiple observers. To render the geometry with correct perspective, we create a fast vertex shader based on a 6D lookup table for projecting 3D scene vertices to a range of subject angles, heights, and distances. We generalize the projection mathematics to arbitrarily shaped display surfaces, which allows us to employ a curved concave display surface to focus the high speed imagery to individual observers. To achieve two-way eye contact, we capture 2D video from a cross-polarized camera reflected to the position of the virtual participant's eyes, and display this 2D video feed on a large screen in front of the real par- ticipant, replicating the viewpoint of their virtual self. To achieve correct vertical perspective, we further leverage this image to track the position of each audience member's eyes, allowing the 3D dis- play to render correct vertical perspective for each of the viewers around the device. The result is a one-to-many 3D teleconferenc- ing system able to reproduce the effects of gaze, attention, and eye contact generally missing in traditional teleconferencing systems.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {article}
}
Wilson, Cyrus A.; Ghosh, Abhijeet; Peers, Pieter; Chiang, Jen-Yuan; Busch, Jay; Debevec, Paul
2D and 3D facial correspondences via photometric alignment Proceedings Article
In: SIGGRAPH, New Orleans, LA, 2009, ISBN: 978-1-60558-834-6.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{wilson_2d_2009,
title = {2D and 3D facial correspondences via photometric alignment},
author = {Cyrus A. Wilson and Abhijeet Ghosh and Pieter Peers and Jen-Yuan Chiang and Jay Busch and Paul Debevec},
url = {http://ict.usc.edu/pubs/2D%20and%203D%20facial%20correspondences%20via%20photometric%20alignment.pdf},
doi = {10.1145/1597990.1598018},
isbn = {978-1-60558-834-6},
year = {2009},
date = {2009-08-01},
booktitle = {SIGGRAPH},
address = {New Orleans, LA},
abstract = {Capturing facial geometry that is high-resolution, yet easy to animate, remains a difficult challenge. While a single scanned geometry may be straightforward to animate smoothly, it may not always yield realistic fine scale detail when deformed into different facial expressions. Combining scans of multiple facial expressions, however, is only practical if geometrical correspondences between the different scanned expressions are available. Correspondences obtained based on locations of facial landmarks or of placed markers are often sparse, especially compared to fine-scale structures such as individual skin pores. The resulting misalignment of fine detail can introduce artifacts or blur out details we wish to preserve.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Chen, Tongbo; Peers, Pieter; Wilson, Cyrus A.; Debevec, Paul
Estimating Specular Roughness and Anisotropy from Second Order Spherical Gradient Illumination Proceedings Article
In: Computer Graphics Forum, pp. 4, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ghosh_estimating_2009,
title = {Estimating Specular Roughness and Anisotropy from Second Order Spherical Gradient Illumination},
author = {Abhijeet Ghosh and Tongbo Chen and Pieter Peers and Cyrus A. Wilson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Estimating%20Specular%20Roughness%20and%20Anisotropy%20from%20Second%20Order%20Spherical%20Gradient%20Illumination.pdf},
year = {2009},
date = {2009-06-01},
booktitle = {Computer Graphics Forum},
volume = {28},
pages = {4},
abstract = {This paper presents a novel method for estimating specular roughness and tangent vectors, per surface point, from polarized second order spherical gradient illumination patterns. We demonstrate that for isotropic BRDFs, only three second order spherical gradients are sufficient to robustly estimate spatially varying specular roughness. For anisotropic BRDFs, an additional two measurements yield specular roughness and tangent vectors per surface point. We verify our approach with different illumination configurations which project both discrete and continuous fields of gradient illumination. Our technique provides a direct estimate of the per-pixel specular roughness and thus does not require off-line numerical optimization that is typical for the measure-and-fit approach to classical BRDF modeling.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Tsiartas, Andreas; Ghosh, Prasanta Kumar; Georgiou, Panayiotis G.; Narayanan, Shrikanth
Robust Word Boundary Detection in Spontaneous Speech using Acoustic and Lexical Clues Proceedings Article
In: Proceedings of ICASSP, Taipei, Taiwan, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{tsiartas_robust_2009,
title = {Robust Word Boundary Detection in Spontaneous Speech using Acoustic and Lexical Clues},
author = {Andreas Tsiartas and Prasanta Kumar Ghosh and Panayiotis G. Georgiou and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Robust%20Word%20Boundary%20Detection%20in%20Spontaneous%20Speech%20using%20Acoustic%20and%20Lexical%20Clues.pdf},
year = {2009},
date = {2009-04-01},
booktitle = {Proceedings of ICASSP},
address = {Taipei, Taiwan},
abstract = {We consider the problem of word boundary detection in spontaneous speech utterances. Acoustic features have been well explored in the literature in the context of word boundary detection; however, in spontaneous speech of Switchboard-I corpus, we found that the accuracy of word boundary detec- tion using acoustic features is poor (F-score ∼ 0.63). We pro- pose a new feature - that captures lexical cues in the context of the word boundary detection problem. We show that includ- ing proposed lexical feature along with the usual acoustic fea- tures, the accuracy of the word boundary detection improves considerably (F-score ∼ 0.81). We also demonstrate the ro- bustness of our proposed feature in presence of different noise levels for additive white and pink noise.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Lamond, Bruce; Peers, Pieter; Ghosh, Abhijeet; Debevec, Paul
Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination, Supplemental Material Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2009, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{lamond_image-based_2009,
title = {Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination, Supplemental Material},
author = {Bruce Lamond and Pieter Peers and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2009.pdf},
year = {2009},
date = {2009-01-01},
number = {ICT TR 01 2009},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present an image-based method for separating dif- fuse and specular reflections using environmental struc- tured illumination. Two types of structured illumination are discussed: phase-shifted sine wave patterns, and phase- shifted binary stripe patterns. In both cases the low-pass filtering nature of diffuse reflections is utilized to separate the reflection components. We illustrate our method on a wide range of example scenes and applications.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Peers, Pieter; Mahajan, Dhruv K.; Lamond, Bruce; Ghosh, Abhijeet; Matusik, Wojciech; Ramamoorth, Ravi; Debevec, Paul
Compressive Light Transport Sensing Journal Article
In: ACM Transactions on Graphics, vol. 28, no. 1, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@article{peers_compressive_2009,
title = {Compressive Light Transport Sensing},
author = {Pieter Peers and Dhruv K. Mahajan and Bruce Lamond and Abhijeet Ghosh and Wojciech Matusik and Ravi Ramamoorth and Paul Debevec},
url = {http://ict.usc.edu/pubs/Compressive%20Light%20Transport%20Sensing.pdf},
year = {2009},
date = {2009-01-01},
journal = {ACM Transactions on Graphics},
volume = {28},
number = {1},
abstract = {In this paper we propose a new framework for capturing light transport data of a real scene, based on the recently developed theory of compressive sensing. Compressive sensing offers a solid mathematical framework to infer a sparse signal from a limited number of non-adaptive measurements. Besides introducing compressive sensing for fast acquisition of light transport to computer graphics, we develop several innovations that address specific challenges for image-based relighting, and which may have broader implications. We develop a novel hierarchical decoding algorithm that improves reconstruction quality by exploiting inter-pixel coherency relations. Additionally, we design new non-adaptive illumination patterns that minimize measurement noise and further improve reconstruction quality. We illustrate our framework by capturing detailed high-resolution reflectance fields for image-based relighting.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Vlasic, Daniel; Peers, Pieter; Baran, Ilya; Debevec, Paul; Popovic, Jovan; Rusinkiewicz, Szymon; Matusik, Wojciech
Dynamic Shape Capture using Multi-View Photometric Stereo Journal Article
In: ACM Transactions on Graphics, vol. 28, no. 5, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@article{vlasic_dynamic_2009,
title = {Dynamic Shape Capture using Multi-View Photometric Stereo},
author = {Daniel Vlasic and Pieter Peers and Ilya Baran and Paul Debevec and Jovan Popovic and Szymon Rusinkiewicz and Wojciech Matusik},
url = {http://ict.usc.edu/pubs/Dynamic%20Shape%20Capture%20using%20Multi-View%20Photometric%20Stereo.pdf},
year = {2009},
date = {2009-01-01},
journal = {ACM Transactions on Graphics},
volume = {28},
number = {5},
abstract = {We describe a system for high-resolution capture of moving 3D geometry, beginning with dynamic normal maps from multiple views. The normal maps are captured using active shape-from-shading (photometric stereo), with a large lighting dome providing a series of novel hemispherical lighting configurations. To compensate for low-frequency deformation, we perform multi-view matching and thin-plate spline deformation on the initial surfaces obtained by integrating the normal maps. Next, the corrected meshes are merged into a single mesh using a volumetric method. The final output is a set of meshes, which were impossible to produce with previous methods. The meshes exhibit details on the order of a few millimeters, and represent the performance over human-size working volumes at a temporal resolution of 60Hz.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Yamada, Hideshi; Peers, Pieter; Debevec, Paul
Compact Representation of Reflectance Fields using Clustered Sparse Residual Factorization Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2009, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{yamada_compact_2009,
title = {Compact Representation of Reflectance Fields using Clustered Sparse Residual Factorization},
author = {Hideshi Yamada and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-02-2009.pdf},
year = {2009},
date = {2009-01-01},
number = {ICT TR 02 2009},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel compression method for fixed viewpoint re- flectance fields, captured for example by a Light Stage. Our com- pressed representation consists of a global approximation that ex- ploits the similarities between the reflectance functions of different pixels, and a local approximation that encodes the per-pixel resid- ual with the global approximation. Key to our method is a clustered sparse residual factorization. This sparse residual factorization en- sures that the per-pixel residual matrix is as sparse as possible, en- abling a compact local approximation. Finally, we demonstrate that the presented compact representation is well suited for high-quality real-time rendering.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Lamond, Bruce; Peers, Pieter; Ghosh, Abhijeet; Debevec, Paul
Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination Proceedings Article
In: IEEE International Conference on Computational Photography, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{lamond_image-based_2009-1,
title = {Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination},
author = {Bruce Lamond and Pieter Peers and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-based%20Separation%20of%20Diffuse%20and%20Specular%20Reflections%20using%20Environmental%20Structured%20Illumination.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {IEEE International Conference on Computational Photography},
abstract = {We present an image-based method for separating diffuse and specular reflections using environmental structured illumination. Two types of structured illumination are discussed: phase-shifted sine wave patterns, and phase-shifted binary stripe patterns. In both cases the low-pass filtering nature of diffuse reflections is utilized to separate the reflection components. We illustrate our method on a wide range of example scenes and applications.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
2008
Ghosh, Abhijeet; Hawkins, Tim; Peers, Pieter; Frederiksen, Sune; Debevec, Paul
Practical Modeling and Acquisition of Layered Facial Reflectance Journal Article
In: ACM Transaction on Graphics, vol. 27, no. 5, 2008.
Abstract | Links | BibTeX | Tags: Graphics
@article{ghosh_practical_2008,
title = {Practical Modeling and Acquisition of Layered Facial Reflectance},
author = {Abhijeet Ghosh and Tim Hawkins and Pieter Peers and Sune Frederiksen and Paul Debevec},
url = {http://ict.usc.edu/pubs/Practical%20Modeling%20and%20Acquisition%20of%20Layered%20Facial%20Reflectance.pdf},
year = {2008},
date = {2008-12-01},
journal = {ACM Transaction on Graphics},
volume = {27},
number = {5},
abstract = {We present a practical method for modeling layered facial reflectance consisting of specular reflectance, single scattering, and shallow and deep subsurface scattering. We estimate parameters of appropriate reflectance models for each of these layers from just 20 photographs recorded in a few seconds from a single viewpoint. We extract spatially-varying specular reflectance and single-scattering parameters from polarization-difference images under spherical and point source illumination. Next, we employ direct-indirect separation to decompose the remaining multiple scattering observed under cross-polarization into shallow and deep scattering components to model the light transport through multiple layers of skin. Finally, we match appropriate diffusion models to the extracted shallow and deep scattering components for different regions on the face. We validate our technique by comparing renderings of subjects to reference photographs recorded from novel viewpoints and under novel illumination conditions.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Ma, Wan-Chun; Jones, Andrew; Hawkins, Tim; Chiang, Jen-Yuan; Debevec, Paul
A high-resolution geometry capture system for facial performance Proceedings Article
In: SIGGRAPH, Los Angeles, CA, 2008.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ma_high-resolution_2008,
title = {A high-resolution geometry capture system for facial performance},
author = {Wan-Chun Ma and Andrew Jones and Tim Hawkins and Jen-Yuan Chiang and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20high-resolution%20geometry%20capture%20system%20for%20facial%20performance.pdf},
year = {2008},
date = {2008-08-01},
booktitle = {SIGGRAPH},
address = {Los Angeles, CA},
abstract = {Results The two cameras capture data at a resolution of 2400× 1800 (Bayer pattern). With a internal RAM storage of 12GB, the maximum recording time is around 5 seconds. The result of each scan contains a high resolution mesh that usually consists of 1M triangles, a smoothed medium resolution mesh, a color texture, a world-space normal map, and a displacement map represents the difference between the high resolution mesh and the smoothed mesh.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Peers, Pieter; Mahajan, Dhruv K.; Lamond, Bruce; Ghosh, Abhijeet; Matusik, Wojciech; Ramamoorthi, Ravi; Debevec, Paul
Compressive Light Transport Sensing Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 05 2008, 2008.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{peers_compressive_2008,
title = {Compressive Light Transport Sensing},
author = {Pieter Peers and Dhruv K. Mahajan and Bruce Lamond and Abhijeet Ghosh and Wojciech Matusik and Ravi Ramamoorthi and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT%20TR%2005%202008.pdf},
year = {2008},
date = {2008-01-01},
number = {ICT TR 05 2008},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this paper we propose a new framework for capturing light trans- port data of a real scene, based on the recently developed theory of compressive sensing. Compressive sensing offers a solid math- ematical framework to infer a sparse signal from a limited number of non-adaptive measurements. Besides introducing compressive sensing for fast acquisition of light transport to computer graphics, we develop several innovations that address specific challenges for image-based relighting, and which may have broader implications. We develop a novel hierarchical decoding algorithm that improves reconstruction quality by exploiting inter-pixel coherency relations. Additionally, we design new non-adaptive illumination patterns that minimize measurement noise and further improve reconstruction quality. We illustrate our framework by capturing detailed high- resolution reflectance fields for image-based relighting.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Ma, Wan-Chun; Jones, Andrew; Chiang, Jen-Yuan; Hawkins, Tim; Frederiksen, Sune; Peers, Pieter; Vukovic, Marko; Ouhyoung, Ming; Debevec, Paul
Facial Performance Synthesis using Deformation-Driven Polynomial Displacement Maps Journal Article
In: ACM Transactions on Graphics, vol. 27, no. 5, 2008.
Abstract | Links | BibTeX | Tags: Graphics
@article{ma_facial_2008,
title = {Facial Performance Synthesis using Deformation-Driven Polynomial Displacement Maps},
author = {Wan-Chun Ma and Andrew Jones and Jen-Yuan Chiang and Tim Hawkins and Sune Frederiksen and Pieter Peers and Marko Vukovic and Ming Ouhyoung and Paul Debevec},
url = {http://ict.usc.edu/pubs/Facial%20Performance%20Synthesis%20using%20Deformation-Driven%20Polynomial%20Displacement%20Maps.pdf},
year = {2008},
date = {2008-01-01},
journal = {ACM Transactions on Graphics},
volume = {27},
number = {5},
abstract = {We present a novel method for acquisition, modeling, compression, and synthesis of realistic facial deformations using polynomial displacement maps. Our method consists of an analysis phase where the relationship between motion capture markers and detailed facial geometry is inferred, and a synthesis phase where novel detailed animated facial geometry is driven solely by a sparse set of motion capture markers. For analysis, we record the actor wearing facial markers while performing a set of training expression clips. We capture real-time high-resolution facial deformations, including dynamic wrinkle and pore detail, using interleaved structured light 3D scanning and photometric stereo. Next, we compute displacements between a neutral mesh driven by the motion capture markers and the high-resolution captured expressions. These geometric displacements are stored in a polynomial displacement map which is parameterized according to the local deformations of the motion capture dots. For synthesis, we drive the polynomial displacement map with new motion capture data. This allows the recreation of large-scale muscle deformation, medium and fine wrinkles, and dynamic skin pore detail. Applications include the compression of existing performance data and the synthesis of new performances. Our technique is independent of the underlying geometry capture system and can be used to automatically generate high-frequency wrinkle and pore details on top of many existing facial animation systems.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Jones, Andrew; Chiang, Jen-Yuan; Ghosh, Abhijeet; Lang, Magnus; Hullin, Matthias; Busch, Jay; Debevec, Paul
Real-time Geometry and Reflectance Capture for Digital Face Replacement Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 04 2008, 2008.
Links | BibTeX | Tags: Graphics
@techreport{jones_real-time_2008,
title = {Real-time Geometry and Reflectance Capture for Digital Face Replacement},
author = {Andrew Jones and Jen-Yuan Chiang and Abhijeet Ghosh and Magnus Lang and Matthias Hullin and Jay Busch and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-04-2008.pdf},
year = {2008},
date = {2008-01-01},
number = {ICT TR 04 2008},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
2007
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
An Interactive 360° Light Field Display Proceedings Article
In: SIGGRAPH, San Diego, CA, 2007.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_interactive_2007,
title = {An Interactive 360° Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
abstract = {While a great deal of computer generated imagery is modeled and rendered in 3D, the vast majority of this 3D imagery is shown on 2D displays. Various forms of 3D displays have been contemplated and constructed for at least one hundred years [Lippman 1908], but only recent evolutions in digital capture, computation, and display have made functional and practical 3D displays possible.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Chabert, Charles-Felix; Bolas, Mark; Peers, Pieter; Debevec, Paul
A system for high-resolution face scanning based on polarized spherical illumination Proceedings Article
In: SIGGRAPH, San Diego, CA, 2007.
Links | BibTeX | Tags: Graphics, MxR
@inproceedings{ma_system_2007,
title = {A system for high-resolution face scanning based on polarized spherical illumination},
author = {Wan-Chun Ma and Tim Hawkins and Charles-Felix Chabert and Mark Bolas and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20system%20for%20high-resolution%20face%20scanning%20based%20on%20polarized%20spherical%20illumination.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
Rendering for an Interactive 360 Degree Light Field Display Proceedings Article
In: ACM SIGGRAPH conference proceedings, San Diego, CA, 2007.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_rendering_2007,
title = {Rendering for an Interactive 360 Degree Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Rendering%20for%20an%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {ACM SIGGRAPH conference proceedings},
address = {San Diego, CA},
abstract = {We describe a set of rendering techniques for an autostereoscopic light field display able to present interactive 3D graphics to multiple simultaneous viewers 360 degrees around the display. The display consists of a high-speed video projector, a spinning mirror covered by a holographic diffuser, and FPGA circuitry to decode specially rendered DVI video signals. The display uses a standard programmable graphics card to render over 5,000 images per second of interactive 3D graphics, projecting 360-degree views with 1.25 degree separation up to 20 updates per second. We describe the system's projection geometry and its calibration process, and we present a multiple-center-of-projection rendering technique for creating perspective-correct images from arbitrary viewpoints around the display. Our projection technique allows correct vertical perspective and parallax to be rendered for any height and distance when these parameters are known, and we demonstrate this effect with interactive raster graphics using a tracking system to measure the viewer's height and distance. We further apply our projection technique to the display of photographed light fields with accurate horizontal and vertical parallax. We conclude with a discussion of the display's visual accommodation performance and discuss techniques for displaying color imagery.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Peers, Pieter; Chabert, Charles-Felix; Weiss, Malte; Debevec, Paul
Rapid Acquisition of Specular and Diffuse Normal Maps from Polarized Spherical Gradient Illumination Proceedings Article
In: Kautz, Jan; Pattanaik, (Ed.): Eurographics Symposium on Rendering, 2007.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ma_rapid_2007,
title = {Rapid Acquisition of Specular and Diffuse Normal Maps from Polarized Spherical Gradient Illumination},
author = {Wan-Chun Ma and Tim Hawkins and Pieter Peers and Charles-Felix Chabert and Malte Weiss and Paul Debevec},
editor = {Jan Kautz and Pattanaik},
url = {http://ict.usc.edu/pubs/Rapid%20Acquisition%20of%20Specular%20and%20Diffuse%20Normal%20Maps%20from%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Eurographics Symposium on Rendering},
abstract = {We estimate surface normal maps of an object from either its diffuse or specular reflectance using four spherical gradient illumination patterns. In contrast to traditional photometric stereo, the spherical patterns allow normals to be estimated simultaneously from any number of viewpoints. We present two polarized lighting techniques that allow the diffuse and specular normal maps of an object to be measured independently. For scattering materials, we show that the specular normal maps yield the best record of detailed surface shape while the diffuse normals deviate from the true surface normal due to subsurface scattering, and that this effect is dependent on wavelength. We show several applications of this acquisition technique. First, we capture normal maps of a facial performance simultaneously from several viewing positions using time-multiplexed illumination. Second, we show that highresolution normal maps based on the specular component can be used with structured light 3D scanning to quickly acquire high-resolution facial surface geometry using off-the-shelf digital still cameras. Finally, we present a realtime shading model that uses independently estimated normal maps for the specular and diffuse color channels to reproduce some of the perceptually important effects of subsurface scattering.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Lamond, Bruce; Peers, Pieter; Debevec, Paul
Fast Image-based Separation of Diffuse and Specular Reflections Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2007, 2007.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{lamond_fast_2007,
title = {Fast Image-based Separation of Diffuse and Specular Reflections},
author = {Bruce Lamond and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-02-2007.pdf},
year = {2007},
date = {2007-01-01},
number = {ICT TR 02 2007},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel image-based method for separating diffuse and specular reflections of real objects under distant environmental illumination. By illuminating a scene with only four high frequency illumination patterns, the specular and diffuse reflections can be separated by computing the maximum and minimum observed pixel values. Furthermore, we show that our method can be extended to separate diffuse and specular components under image-based environmental illumination. Applications range from image-based modeling of reflectance properties to improved normal and geometry acquisition.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
2006
Debevec, Paul; Bolas, Mark; McDowall, Ian
Concave Surround Optics for Rapid Multi-View Imaging Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{debevec_concave_2006,
title = {Concave Surround Optics for Rapid Multi-View Imaging},
author = {Paul Debevec and Mark Bolas and Ian McDowall},
url = {http://ict.usc.edu/pubs/ConcaveSurroundOptics_ASC2006.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Many image-based modeling and rendering techniques involve photographing a scene from an array of different viewpoints. Usually, this is achieved by moving the camera or the subject to successive positions, or by photographing the scene with an array of cameras. In this work, we present a system of mirrors to simulate the appearance of camera movement around a scene while the physical camera remains stationary. The system thus is amenable to capturing dynamic events avoiding the need to construct and calibrate an array of cameras. We demonstrate the system with a high speed video of a dynamic scene. We show smooth camera motion rotating 360 degrees around the scene. We discuss the optical performance of our system and compare with alternate setups.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Simulating Spatially Varying Lighting on a Live Performance Proceedings Article
In: 3rd European Conference on Visual Media Production (CVMP 2006), London, UK, 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_simulating_2006,
title = {Simulating Spatially Varying Lighting on a Live Performance},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Simulating%20Spatially%20Varying%20Lighting%20on%20a%20Live%20Performance.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {3rd European Conference on Visual Media Production (CVMP 2006)},
address = {London, UK},
abstract = {We present an image-based technique for relighting dynamic human performances under spatially varying illumination. Our system generates a time-multiplexed LED basis and a geometric model recovered from high-speed structured light patterns. The geometric model is used to scale the intensity of each pixel differently according to its 3D position within the spatially varying illumination volume. This yields a first-order approximation of the correct appearance under the spatially varying illumination. A global illumination process removes indirect illumination from the original lighting basis and simulates spatially varying indirect illumination. We demonstrate this technique for a human performance under several spatially varying lighting environments.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
Virtual Cinematography: Relighting through Computation Journal Article
In: IEEE ComputerMagazine, vol. 39, pp. 57–65, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@article{debevec_virtual_2006,
title = {Virtual Cinematography: Relighting through Computation},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Cinematography-%20Relighting%20through%20Computation.pdf},
year = {2006},
date = {2006-08-01},
journal = {IEEE ComputerMagazine},
volume = {39},
pages = {57–65},
abstract = {Recording how scenes transform incident illumination into radiant light is an active topic in computational photography. Such techniques make it possible to create virtual images of a person or place from new viewpoints and in any form of illumination.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Callieri, Marco; Debevec, Paul; Scopigno, Roberto
A realtime immersive application with realistic lighting: The Parthenon Journal Article
In: Computers & Graphics, vol. 30, no. 3, pp. 368–376, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@article{callieri_realtime_2006,
title = {A realtime immersive application with realistic lighting: The Parthenon},
author = {Marco Callieri and Paul Debevec and Roberto Scopigno},
url = {http://ict.usc.edu/pubs/A%20realtime%20immersive%20application%20with%20realistic%20lighting-%20The%20Parthenon.pdf},
year = {2006},
date = {2006-06-01},
journal = {Computers & Graphics},
volume = {30},
number = {3},
pages = {368–376},
abstract = {Offline rendering techniques have nowadays reached an astonishing level of realism but pay the cost of long computational times. The new generation of programmable graphic hardware, on the other hand, gives the possibility to implement in realtime some of the visual effects previously available only for cinematographic production. We describe the design and implementation of an interactive system which is able to reproduce in realtime one of the crucial sequences from the short movie “The Parthenon” presented at Siggraph 2004. The application is designed to run on a specific immersive reality system, making possible for a user to perceive the virtual environment with nearly cinematographic visual quality.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Einarsson, Per; Chabert, Charles-Felix; Jones, Andrew; Ma, Wan-Chun; Lamond, Bruce; Hawkins, Tim; Bolas, Mark; Sylwan, Sebastian; Debevec, Paul
Relighting Human Locomotion with Flowed Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering (2006), 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{einarsson_relighting_2006,
title = {Relighting Human Locomotion with Flowed Reflectance Fields},
author = {Per Einarsson and Charles-Felix Chabert and Andrew Jones and Wan-Chun Ma and Bruce Lamond and Tim Hawkins and Mark Bolas and Sebastian Sylwan and Paul Debevec},
url = {http://ict.usc.edu/pubs/Relighting%20Human%20Locomotion%20with%20Flowed%20Reflectance%20Fields.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Eurographics Symposium on Rendering (2006)},
abstract = {We present an image-based approach for capturing the appearance of a walking or running person so they can be rendered realistically under variable viewpoint and illumination. In our approach, a person walks on a treadmill at a regular rate as a turntable slowly rotates the person's direction. As this happens, the person is filmed with a vertical array of high-speed cameras under a time-multiplexed lighting basis, acquiring a seven-dimensional dataset of the person under variable time, illumination, and viewing direction in approximately forty seconds. We process this data into a flowed reflectance field using an optical flow algorithm to correspond pixels in neighboring camera views and time samples to each other, and we use image compression to reduce the size of this data.We then use image-based relighting and a hardware-accelerated combination of view morphing and light field rendering to render the subject under user-specified viewpoint and lighting conditions. To composite the person into a scene, we use an alpha channel derived from back lighting and a retroreflective treadmill surface and a visual hull process to render the shadows the person would cast onto the ground. We demonstrate realistic composites of several subjects into real and virtual environments using our technique.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters Proceedings Article
In: 11th International Fall Workshop on Vision, Modeling and Visualization, Aachen, Germany, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{tariq_efficient_2006-1,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/Efficient%20Estimation%20of%20Spatially%20Varying%20Subsurface%20Scattering%20Parameters.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {11th International Fall Workshop on Vision, Modeling and Visualization},
address = {Aachen, Germany},
abstract = {We present an image-based technique to efficiently acquire spatially varying subsurface reflectance properties of a human face. The estimated prop- erties can be used directly to render faces with spa- tially varying scattering, or can be used to estimate a robust average across the face. We demonstrate our technique with renderings of peoples' faces un- der novel, spatially-varying illumination and pro- vide comparisons with current techniques. Our cap- tured data consists of images of the face from a sin- gle viewpoint under two small sets of projected im- ages. The first set, a sequence of phase-shifted pe- riodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The sec- ond set of structured light patterns is used to obtain face geometry. We subtract the minimum of each profile to remove the contribution of interreflected light from the rest of the face, and then match the observed reflectance profiles to scattering properties predicted by a scattering model using a lookup ta- ble. From these properties we can generate images of the subsurface reflectance of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2006, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{tariq_efficient_2006,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 01 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present an image-based technique to rapidly ac- quire spatially varying subsurface reflectance prop- erties of a human face. The estimated properties can be used directly to render faces with spatially vary- ing scattering, or can be used to estimate a robust average across the face. We demonstrate our tech- nique with renderings of peoples' faces under novel, spatially-varying illumination and provide compar- isons with current techniques. Our captured data consists of images of the face from a single view- point under two small sets of projected images. The first set, a sequence of phase shifted periodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The second set contains structured light and is used to obtain face geometry. We match the observed reflectance pro- files to scattering properties predicted by a scatter- ing model using a lookup table. From these prop- erties we can generate images of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Peers, Pieter; Hawkins, Tim; Debevec, Paul
A Reflective Light Stage Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 04 2006, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{peers_reflective_2006,
title = {A Reflective Light Stage},
author = {Pieter Peers and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-04.2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 04 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel acquisition device to capture high resolution 4D re- flectance fields of real scenes. The device consists of a concave hemispher- ical surface coated with a rough specular paint and a digital video projector with a fish-eye lens positioned near the center of the hemisphere. The scene is placed near the projector, also near the center, and photographed from a fixed vantage point. The projector projects a high-resolution image of incident illu- mination which is reflected by the rough hemispherical surface to become the illumination on the scene. We demonstrate the utility of this device by cap- turing a high resolution hemispherical reflectance field of a specular object which would be difficult to capture using previous acquisition techniques.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
2005
Debevec, Paul
Capturing and Simulating Physically Accurate Illumination in Computer Graphics Proceedings Article
In: 11th Annual Symposium on Frontiers of Engineering, Niskayuna, NY, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_capturing_2005,
title = {Capturing and Simulating Physically Accurate Illumination in Computer Graphics},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Simulating%20Physically%20Accurate%20Illumination%20in%20Computer%20Graphics.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {11th Annual Symposium on Frontiers of Engineering},
address = {Niskayuna, NY},
abstract = {Anyone who has seen a recent summer blockbuster has witnessed the dramatic increases in computer-generated realism in recent years. Visual effects supervisors now report that bringing even the most challenging visions of film directors to the screen is no longer a question of whatDs possible; with todayDs techniques it is only a matter of time and cost. Driving this increase in realism have been computer graphics (CG) techniques for simulating how light travels within a scene and for simulating how light reflects off of and through surfaces. These techniquesJsome developed recently, and some originating in the 1980DsJare being applied to the visual effects process by computer graphics artists who have found ways to channel the power of these new tools.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Median Cut Algorithm for Light Probe Sampling Proceedings Article
In: SIGGRAPH (Special Interest Group - Graphics), Los Angeles, CA, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_median_2005,
title = {A Median Cut Algorithm for Light Probe Sampling},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Median%20Cut%20Algorithm%20for%20Light%20Probe%20Sampling.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH (Special Interest Group - Graphics)},
address = {Los Angeles, CA},
abstract = {We present a technique for approximating a light probe image as a constellation of light sources based on a median cut algorithm. The algorithm is efï¬cient, simple to implement, and can realistically represent a complex lighting environment with as few as 64 point light sources.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Performance Geometry Capture for Spatially Varying Relighting Proceedings Article
In: SIGGRAPH 2005 Sketch, Los Angeles, CA, 2005.
Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_performance_2005,
title = {Performance Geometry Capture for Spatially Varying Relighting},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Performance%20Geometry%20Capture%20for%20Spatially%20Varying%20Relighting.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH 2005 Sketch},
address = {Los Angeles, CA},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Einarsson, Per; Debevec, Paul
Acquisition of Time-Varying Participating Media Proceedings Article
In: SIGGRAPH, Los Angeles, CA, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_acquisition_2005,
title = {Acquisition of Time-Varying Participating Media},
author = {Tim Hawkins and Per Einarsson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Acquisition%20of%20Time-Varying%20Participating%20Media.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {SIGGRAPH},
address = {Los Angeles, CA},
abstract = {We present a technique for capturing time-varying volumetric data of participating media. A laser sheet is swept repeatedly through the volume, and the scattered light is imaged using a high-speed camera. Each sweep of the laser provides a near-simultaneous volume of density values. We demonstrate rendered animations under changing viewpoint and illumination, making use of measured values for the scattering phase function and albedo.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Einarsson, Per; Debevec, Paul
A Dual Light Stage Proceedings Article
In: Dutré, Philip; Bala, Kavita (Ed.): Eurographics Symposium on Rendering, Konstanz, Germany, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_dual_2005,
title = {A Dual Light Stage},
author = {Tim Hawkins and Per Einarsson and Paul Debevec},
editor = {Philip Dutré and Kavita Bala},
url = {http://ict.usc.edu/pubs/A%20Dual%20Light%20Stage.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Eurographics Symposium on Rendering},
address = {Konstanz, Germany},
abstract = {We present a technique for capturing high-resolution 4D reflectance ï¬elds using the reciprocity property of light transport. In our technique we place the object inside a diffuse spherical shell and scan a laser across its surface. For each incident ray, the object scatters a pattern of light onto the inner surface of the sphere, and we photograph the resulting radiance from the sphere's interior using a camera with a ï¬sheye lens. Because of reciprocity, the image of the inside of the sphere corresponds to the reflectance function of the surface point illuminated by the laser, that is, the color that point would appear to a camera along the laser ray when the object is lit from each direction on the surface of the sphere. The measured reflectance functions allow the object to be photorealistically rendered from the laser's viewpoint under arbitrary directional illumination conditions. Since each captured reflectance function is a high-resolution image, our data reproduces sharp specular reflections and self-shadowing more accurately than previous approaches. We demonstrate our technique by scanning objects with a wide range of reflectance properties and show accurate renderings of the objects under novel illumination conditions.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
2004
Poullis, Charalambos; Gardner, Andrew; Debevec, Paul
Photogrammetric Modeling and Image-based Rendering for Rapid Virtual Environement Creation Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{poullis_photogrammetric_2004,
title = {Photogrammetric Modeling and Image-based Rendering for Rapid Virtual Environement Creation},
author = {Charalambos Poullis and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/PHOTOGRAMMETRIC%20MODELING%20AND%20IMAGE-BASED%20RENDERING%20FOR%20RAPID%20VIRTUAL%20ENVIRONMENT%20CREATION.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {For realistic simulations, architecture is one of the most important elements to model and render photorealistically. Current techniques of converting architectural plans or survey data to CAD models are labor intensive, and methods for rendering such models are generally not photorealistic. In this work, we present a new approach for modeling and rendering existing architectural scenes from a sparse set of still photographs. For modeling, we use photogrammetric modeling techniques to recover a the geometric representation of the architecture. The photogrammetric modeling approach presented in this paper is effective, robust and powerful because it fully exploits structural symmetries and constraints which are characteristic of architectural scenes. For rendering, we use view-dependent texture mapping, a method for compositing multiple images of a scene to create renderings from novel views. Lastly, we present a software package, named Fac¸ade, which uses the techniques described to recover the geometry and appearance of architectural scenes directly from a sparse set of photographs.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Stumpfel, Jessi; Jones, Andrew; Wenger, Andreas; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Direct HDR Capture of the Sun and Sky Proceedings Article
In: Proceedings of the 3rd International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa, Stellenbosch, South Africa, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{stumpfel_direct_2004,
title = {Direct HDR Capture of the Sun and Sky},
author = {Jessi Stumpfel and Andrew Jones and Andreas Wenger and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Direct%20HDR%20Capture%20of%20the%20Sun%20and%20Sky.pdf},
year = {2004},
date = {2004-11-01},
booktitle = {Proceedings of the 3rd International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa},
address = {Stellenbosch, South Africa},
abstract = {We present a technique for capturing the extreme dynamic range of natural illumination environments that include the sun and sky, which has presented a challenge for traditional high dynamic range photography processes. We find that through careful selection of exposure times, aperture, and neutral density filters that this full range can be covered in seven exposures with a standard digital camera. We discuss the particular calibration issues such as lens vignetting, in- frared sensitivity, and spectral transmission of neutral den- sity filters which must be addressed. We present an adap- tive exposure range adjustment technique for minimizing the number of exposures necessary. We demonstrate our results by showing time-lapse renderings of a complex scene illuminated by high-resolution, high dynamic range natural illumination environments.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Gardner, Andrew; Tchou, Chris; Hawkins, Tim
Postproduction Re-Illumination of Live Action Using Time-Multiplexed Lighting Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 05.2004, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{debevec_postproduction_2004,
title = {Postproduction Re-Illumination of Live Action Using Time-Multiplexed Lighting},
author = {Paul Debevec and Andrew Gardner and Chris Tchou and Tim Hawkins},
url = {http://ict.usc.edu/pubs/Postproduction%20Re-Illumination%20of%20Live%20Action%20Using%20Time-Multiplexed%20Lighting.pdf},
year = {2004},
date = {2004-06-01},
number = {ICT TR 05.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this work, we present a technique for capturing a time-varying human performance in such a way that it can be re-illuminated in postproduction. The key idea is to illuminate the subject with a variety of rapidly changing time-multiplexed basis lighting conditions, and to record these lighting conditions with a fast enough video camera so that several or many different basis lighting conditions are recorded during the span of the final video's desired frame rate. In this poster we present two versions of such a system and propose plans for creating a complete, production-ready device.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Hawkins, Tim; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Goransson, Fredrik; Debevec, Paul
Animatable Facial Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering, Norkoping, Sweden, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_animatable_2004,
title = {Animatable Facial Reflectance Fields},
author = {Tim Hawkins and Andreas Wenger and Chris Tchou and Andrew Gardner and Fredrik Goransson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Animatable%20Facial%20Re%EF%AC%82ectance%20Fields.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Eurographics Symposium on Rendering},
address = {Norkoping, Sweden},
abstract = {We present a technique for creating an animatable image-based appearance model of a human face, able to capture appearance variation over changing facial expression, head pose, view direction, and lighting condition. Our capture process makes use of a specialized lighting apparatus designed to rapidly illuminate the subject sequentially from many different directions in just a few seconds. For each pose, the subject remains still while six video cameras capture their appearance under each of the directions of lighting. We repeat this process for approximately 60 different poses, capturing different expressions, visemes, head poses, and eye positions. The images for each of the poses and camera views are registered to each other semi-automatically with the help of fiducial markers. The result is a model which can be rendered realistically under any linear blend of the captured poses and under any desired lighting condition by warping, scaling, and blending data from the original images. Finally, we show how to drive the model with performance capture data, where the pose is not necessarily a linear combination of the original captured poses.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Tchou, Chris; Gardner, Andrew; Hawkins, Tim; Poullis, Charis; Stumpfel, Jessi; Jones, Andrew; Yun, Nathaniel; Einarsson, Per; Lundgren, Therese; Fajardo, Marcos; Martinez, Philippe
Estimating Surface Reflectance Properties of a Complex Scene under Captured Natural Illumination Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 06 2004, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{debevec_estimating_2004,
title = {Estimating Surface Reflectance Properties of a Complex Scene under Captured Natural Illumination},
author = {Paul Debevec and Chris Tchou and Andrew Gardner and Tim Hawkins and Charis Poullis and Jessi Stumpfel and Andrew Jones and Nathaniel Yun and Per Einarsson and Therese Lundgren and Marcos Fajardo and Philippe Martinez},
url = {http://ict.usc.edu/pubs/ICT-TR-06.2004.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 06 2004},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a process for estimating spatially-varying surface re- flectance of a complex scene observed under natural illumination conditions. The process uses a laser-scanned model of the scene's geometry, a set of digital images viewing the scene's surfaces under a variety of natural illumination conditions, and a set of correspond- ing measurements of the scene's incident illumination in each pho- tograph. The process then employs an iterative inverse global illu- mination technique to compute surface colors for the scene which, when rendered under the recorded illumination conditions, best re- produce the scene's appearance in the photographs. In our process we measure BRDFs of representative surfaces in the scene to better model the non-Lambertian surface reflectance. Our process uses a novel lighting measurement apparatus to record the full dynamic range of both sunlit and cloudy natural illumination conditions. We employ Monte-Carlo global illumination, multiresolution geome- try, and a texture atlas system to perform inverse global illumina- tion on the scene. The result is a lighting-independent model of the scene that can be re-illuminated under any form of lighting. We demonstrate the process on a real-world archaeological site, show- ing that the technique can produce novel illumination renderings consistent with real photographs as well as reflectance properties that are consistent with ground-truth reflectance measurements.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
2003
Debevec, Paul
Image-Based Techniques for Digitizing Environments and Artifacts Proceedings Article
In: 4th International Conference on 3-D Digital Imaging and Modeling (3DIM), 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_image-based_2003,
title = {Image-Based Techniques for Digitizing Environments and Artifacts},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Techniques%20for%20Digitizing%20Environments%20and%20Artifacts.pdf},
year = {2003},
date = {2003-10-01},
booktitle = {4th International Conference on 3-D Digital Imaging and Modeling (3DIM)},
abstract = {This paper presents an overview of techniques for generating photoreal computer graphics models of real-world places and objects. Our group's early efforts in modeling scenes involved the development of Facade, an interactive photogrammetric modeling system that uses geometric primitives to model the scene, and projective texture mapping to produce the scene appearance properties. Subsequent work has produced techniques to model the incident illumination within scenes, which we have shown to be useful for realistically adding computer-generated objects to image-based models. More recently, our work has focussed on recovering lighting-independent models of scenes and objects, capturing how each point on an object reflects light. Our latest work combines three-dimensional range scans, digital photographs, and incident illumination measurements to produce lighting-independent models of complex objects and environments.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gardner, Andrew; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Linear Light Source Reflectometry Proceedings Article
In: ACM Transactions on Graphics, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{gardner_linear_2003,
title = {Linear Light Source Reflectometry},
author = {Andrew Gardner and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Linear%20Light%20Source%20Reflectometry.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {ACM Transactions on Graphics},
abstract = {This paper presents a technique for estimating the spatially-varying reflectance properties of a surface based on its appearance during a single pass of a linear light source. By using a linear light rather than a point light source as the illuminant, we are able to reliably observe and estimate the diffuse color, specular color, and specular roughness of each point of the surface. The reflectometry apparatus we use is simple and inexpensive to build, requiring a single direction of motion for the light source and a fixed camera viewpoint. Our model fitting technique first renders a reflectance table of how diffuse and specular reflectance lobes would appear under moving linear light source illumination. Then, for each pixel we compare its series of intensity values to the tabulated reflectance lobes to determine which reflectance model parameters most closely produce the observed reflectance values. Using two passes of the linear light source at different angles, we can also estimate per-pixel surface normals as well as the reflectance parameters. Additionally our system records a per-pixel height map for the object and estimates its per-pixel translucency. We produce real-time renderings of the captured objects using a custom hardware shading algorithm. We apply the technique to a test object exhibiting a variety of materials as well as to an illuminated manuscript with gold lettering. To demonstrate the technique's accuracy, we compare renderings of the captured models to real photographs of the original objects.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Unger, J.; Wenger, Andreas; Hawkins, Tim; Gardner, Andrew; Debevec, Paul
Capturing and Rendering With Incident Light Fields Proceedings Article
In: Proceedings of the 14th Eurographics workshop on Rendering, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{unger_capturing_2003,
title = {Capturing and Rendering With Incident Light Fields},
author = {J. Unger and Andreas Wenger and Tim Hawkins and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Rendering%20With%20Incident%20Light%20Fields.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 14th Eurographics workshop on Rendering},
abstract = {This paper presents a process for capturing spatially and directionally varying illumination from a real-world scene and using this lighting to illuminate computer-generated objects. We use two devices for capturing such illumination. In the first we photograph an array of mirrored spheres in high dynamic range to capture the spatially varying illumination. In the second, we obtain higher resolution data by capturing images with an high dynamic range omnidirectional camera as it traverses across a plane. For both methods we apply the light field technique to extrapolate the incident illumination to a volume. We render computer-generated objects as illuminated by this captured illumination using a custom shader within an existing global illumination rendering system. To demonstrate our technique we capture several spatially-varying lighting environments with spotlights, shadows, and dappled lighting and use them to illuminate synthetic scenes. We also show comparisons to real objects under the same illumination.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
2002
Debevec, Paul; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Waese, Jamie; Hawkins, Tim
A Lighting Reproduction Approach to Live-Action Compositing Proceedings Article
In: SIGGRAPH 2002, pp. 547–556, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_lighting_2002,
title = {A Lighting Reproduction Approach to Live-Action Compositing},
author = {Paul Debevec and Andreas Wenger and Chris Tchou and Andrew Gardner and Jamie Waese and Tim Hawkins},
url = {http://ict.usc.edu/pubs/A%20Lighting%20Reproduction%20Approach%20to%20Live-Action%20Compositing.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {SIGGRAPH 2002},
pages = {547–556},
address = {San Antonio, TX},
abstract = {We describe a process for compositing a live performance of an actor into a virtual set wherein the actor is consistently illuminated by the virtual environment. The Light Stage used in this work is a two-meter sphere of inward-pointing RGB light emitting diodes focused on the actor, where each light can be set to an arbitrary color and intensity to replicate a real-world or virtual lighting environment. We implement a digital two-camera infrared matting system to composite the actor into the background plate of the environment without affecting the visible-spectrum illumination on the actor. The color reponse of the system is calibrated to produce correct color renditions of the actor as illuminated by the environment. We demonstrate moving-camera composites of actors into real-world environments and virtual sets such that the actor is properly illuminated by the environment into which they are composited.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Tutorial on Image-Based Lighting Journal Article
In: IEEE Computer Graphics and Applications, 2002.
Links | BibTeX | Tags: Graphics
@article{debevec_tutorial_2002,
title = {A Tutorial on Image-Based Lighting},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Lighting.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Computer Graphics and Applications},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
2001
Cohen, Jonathan; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Real-Time High-Dynamic Range Texture Mapping Proceedings Article
In: Eurographics Rendering Workshop, 2001.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{cohen_real-time_2001,
title = {Real-Time High-Dynamic Range Texture Mapping},
author = {Jonathan Cohen and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Real-Time%20High-Dynamic%20Range%20Texture%20Mapping.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Eurographics Rendering Workshop},
abstract = {This paper presents a technique for representing and displaying high dynamic-range texture maps (HDRTMs) using current graphics hardware. Dynamic range in real-world environments often far exceeds the range representable in 8-bit per-channel texture maps. The increased realism afforded by a high-dynamic range representation provides improved fidelity and expressiveness for interactive visualization of image-based models. Our technique allows for real-time rendering of scenes with arbitrary dynamic range, limited only by available texture memory. In our technique, high-dynamic range textures are decomposed into sets of 8- bit textures. These 8-bit textures are dynamically reassembled by the graphics hardware's programmable multitexturing system or using multipass techniques and framebuffer image processing. These operations allow the exposure level of the texture to be adjusted continuously and arbitrarily at the time of rendering, correctly accounting for the gamma curve and dynamic range restrictions of the display device. Further, for any given exposure only two 8-bit textures must be resident in texture memory simultaneously. We present implementation details of this technique on various 3D graphics hardware architectures. We demonstrate several applications, including high-dynamic range panoramic viewing with simulated auto-exposure, real-time radiance environment mapping, and simulated Fresnel reflection.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Waese, Jamie; Debevec, Paul
A Real Time High Dynamic Range Light Probe Proceedings Article
In: SIGGRAPH Technical Sketches, 2001.
Links | BibTeX | Tags: Graphics
@inproceedings{waese_real_2001,
title = {A Real Time High Dynamic Range Light Probe},
author = {Jamie Waese and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Real%20Time%20High%20Dynamic%20Range%20Light%20Probe.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Tchou, Chris; Debevec, Paul
Light Stage 2.0 Proceedings Article
In: SIGGRAPH Technical Sketches, pp. 217, 2001.
Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_light_2001,
title = {Light Stage 2.0},
author = {Tim Hawkins and Jonathan Cohen and Chris Tchou and Paul Debevec},
url = {http://ict.usc.edu/pubs/Light%20Stage%202.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {SIGGRAPH Technical Sketches},
pages = {217},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Cohen, Jonathan; Debevec, Paul
A Photometric Approach to Digitizing Cultural Artifacts Proceedings Article
In: Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage, Glyfada, Greece, 2001.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_photometric_2001,
title = {A Photometric Approach to Digitizing Cultural Artifacts},
author = {Tim Hawkins and Jonathan Cohen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Photometric%20Approach%20to%20Digitizing%20Cultural%20Artifacts.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of 2nd International Symposium on Virtual Reality, Archaeology and Cultural Heritage},
address = {Glyfada, Greece},
abstract = {In this paper we present a photometry-based approach to the digital documentation of cultural artifacts. Rather than representing an artifact as a geometric model with spatially varying reflectance properties, we instead propose directly representing the artifact in terms of its reflectance field - the manner in which it transforms light into images. The principal device employed in our technique is a computer-controlled lighting apparatus which quickly illuminates an artifact from an exhaustive set of incident illumination directions and a set of digital video cameras which record the artifact's appearance under these forms of illumination. From this database of recorded images, we compute linear combinations of the captured images to synthetically illuminate the object under arbitrary forms of complex incident illumination, correctly capturing the effects of specular reflection, subsurface scattering, self-shadowing, mutual illumination, and complex BRDF's often present in cultural artifacts. We also describe a computer application that allows users to realistically and interactively relight digitized artifacts.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}