Publications
Search
LeGendre, Chloe; Bladin, Kalle; Kishore, Bipin; Ren, Xinglei; Yu, Xueming; Debevec, Paul
Efficient Multispectral Facial Capture with Monochrome Cameras Proceedings Article
In: ACM SIGGRAPH 2018 Posters on - SIGGRAPH '18, ACM Press, Vancouver, British Columbia, Canada, 2018, ISBN: 978-1-4503-5817-0.
@inproceedings{legendre_efficient_2018,
title = {Efficient Multispectral Facial Capture with Monochrome Cameras},
author = {Chloe LeGendre and Kalle Bladin and Bipin Kishore and Xinglei Ren and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?doid=3230744.3230778},
doi = {10.1145/3230744.3230778},
isbn = {978-1-4503-5817-0},
year = {2018},
date = {2018-08-01},
booktitle = {ACM SIGGRAPH 2018 Posters on - SIGGRAPH '18},
publisher = {ACM Press},
address = {Vancouver, British Columbia, Canada},
abstract = {We propose a variant to polarized gradient illumination facial scanning which uses monochrome instead of color cameras to achieve more efficient and higher-resolution results. In typical polarized gradient facial scanning, sub-millimeter geometric detail is acquired by photographing the subject in eight or more polarized spherical gradient lighting conditions made with white LEDs, and RGB cameras are used to acquire color texture maps of the subject's appearance. In our approach, we replace the color cameras and white LEDs with monochrome cameras and multispectral, colored LEDs, leveraging that color images can be formed from successive monochrome images recorded under different illumination colors. While a naive extension of the scanning process to this setup would require multiplying the number of images by number of color channels, we show that the surface detail maps can be estimated directly from monochrome imagery, so that only an additional n photographs are required, where n is the number of added spectral channels. We also introduce a new multispectral optical flow approach to align images across spectral channels in the presence of slight subject motion. Lastly, for the case where a capture system's white light sources are polarized and its multispectral colored LEDs are not, we introduce the technique of multispectral polarization promotion, where we estimate the cross- and parallel-polarized monochrome images for each spectral channel from their corresponding images under a full sphere of even, unpolarized illumination. We demonstrate that this technique allows us to efficiently acquire a full color (or even multispectral) facial scan using monochrome cameras, unpolarized multispectral colored LEDs, and polarized white LEDs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Huynh, Loc; Chen, Weikai; Saito, Shunsuke; Xing, Jun; Nagano, Koki; Jones, Andrew; Debevec, Paul; Li, Hao
Mesoscopic Facial Geometry Inference Using Deep Neural Networks Proceedings Article
In: Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition, IEEE, Salt Lake City, UT, 2018.
@inproceedings{huynh_mesoscopic_2018,
title = {Mesoscopic Facial Geometry Inference Using Deep Neural Networks},
author = {Loc Huynh and Weikai Chen and Shunsuke Saito and Jun Xing and Koki Nagano and Andrew Jones and Paul Debevec and Hao Li},
url = {http://openaccess.thecvf.com/content_cvpr_2018/papers/Huynh_Mesoscopic_Facial_Geometry_CVPR_2018_paper.pdf},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition},
publisher = {IEEE},
address = {Salt Lake City, UT},
abstract = {We present a learning-based approach for synthesizing facial geometry at medium and fine scales from diffusely-lit facial texture maps. When applied to an image sequence, the synthesized detail is temporally coherent. Unlike current state-of-the-art methods [17, 5], which assume ”dark is deep”, our model is trained with measured facial detail collected using polarized gradient illumination in a Light Stage [20]. This enables us to produce plausible facial detail across the entire face, including where previous approaches may incorrectly interpret dark features as concavities such as at moles, hair stubble, and occluded pores. Instead of directly inferring 3D geometry, we propose to encode fine details in high-resolution displacement maps which are learned through a hybrid network adopting the state-of-the-art image-to-image translation network [29] and super resolution network [43]. To effectively capture geometric detail at both mid- and high frequencies, we factorize the learning into two separate sub-networks, enabling the full range of facial detail to be modeled. Results from our learning-based approach compare favorably with a high-quality active facial scanhening technique, and require only a single passive lighting condition without a complex scanning setup.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Hyunh, Loc; Wang, Shanhe; Debevec, Paul
Modeling vellus facial hair from asperity scattering silhouettes Proceedings Article
In: Proceedings of SIGGRAPH 2017, pp. 1–2, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5008-2.
@inproceedings{legendre_modeling_2017,
title = {Modeling vellus facial hair from asperity scattering silhouettes},
author = {Chloe LeGendre and Loc Hyunh and Shanhe Wang and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?doid=3084363.3085057},
doi = {10.1145/3084363.3085057},
isbn = {978-1-4503-5008-2},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of SIGGRAPH 2017},
pages = {1–2},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a technique for modeling the vellus hair over the face based on observations of asperity scattering along a subject's silhouette. We photograph the backlit subject in profile and three-quarters views with a high-resolution DSLR camera to observe the vellus hair on the side and front of the face and separately acquire a 3D scan of the face geometry and texture. We render a library of backlit vellus hair patch samples with different geometric parameters such as density, orientation, and curvature, and we compute image statistics for each set of parameters. We trace the silhouette contour in each face image and straighten the backlit hair silhouettes using image resampling. We compute image statistics for each section of the facial silhouette and determine which set of hair modeling parameters best matches the statistics. We then generate a complete set of vellus hairs for the face by interpolating and extrapolating the matched parameters over the skin. We add the modeled vellus hairs to the 3D facial scan and generate renderings under novel lighting conditions, generally matching the appearance of real photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Krissman, David; Debevec, Paul
Improved Chromakey of Hair Strands via Orientation Filter Convolution Proceedings Article
In: Proceeding of SIGGRAPH '17 ACM SIGGRAPH 2017, pp. 1–2, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5015-0.
@inproceedings{legendre_improved_2017,
title = {Improved Chromakey of Hair Strands via Orientation Filter Convolution},
author = {Chloe LeGendre and David Krissman and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=3102200},
doi = {10.1145/3102163.3102200},
isbn = {978-1-4503-5015-0},
year = {2017},
date = {2017-07-01},
booktitle = {Proceeding of SIGGRAPH '17 ACM SIGGRAPH 2017},
pages = {1–2},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a technique for improving the alpha maing of challenging green-screen video sequences involving hair strands. As hair strands are thin and can be semi-translucent, they are especially hard to separate from a background. However, they appear as extended lines and thus have a strong response when convolved with oriented filters, even in the presence of noise. We leverage this oriented filter response to robustly locate hair strands within each frame of an actor’s performance filmed in front of a green-screen. We demonstrate using production video footage that individual hair fibers excluded from a coarse artist’s matte can be located and then added to the foreground element, qualitatively improving the composite result without added manual labor.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Efficient Multispectral Reflectance Function Capture for Image-Based Relighting Proceedings Article
In: Proceedings of the Color and Imaging Conference, pp. 47–58, Society for Imaging Science and Technology, San Diego, CA, 2016.
@inproceedings{legendre_efficient_2016,
title = {Efficient Multispectral Reflectance Function Capture for Image-Based Relighting},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://www.ingentaconnect.com/contentone/ist/cic/2016/00002016/00000001/art00008},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the Color and Imaging Conference},
pages = {47–58},
publisher = {Society for Imaging Science and Technology},
address = {San Diego, CA},
abstract = {Image-based relighting (IBRL) renders the appearance of a subject in a novel lighting environment as a linear combination of the images of its reflectance field , the appearance of the subject lit by each incident lighting direction. Traditionally, a tristimulus color camera records the reflectance field as the subject is sequentially illuminated by broad-spectrum white light sources from each direction. Using a multispectral LED sphere and either a tristimulus (RGB) or monochrome camera, we photograph a still life scene to acquire its multispectral reflectance field – its appearance for every lighting direction for multiple incident illumination spectra. For the tristimulus camera, we demonstrate improved color rendition for IBRL when using the multispectral reflectance field, producing a closer match to the scene's actual appearance in a real-world illumination environment. For the monochrome camera, we also show close visual matches. We additionally propose an efficient method for acquiring such multispectral reflectance fields, augmenting the traditional broad-spectrum lighting basis capture with only a few additional images equal to the desired number of spectral channels. In these additional images, we illuminate the subject by a complete sphere of each available narrow-band LED light source, in our case: red, amber, green, cyan, and blue. From the full-sphere illumination images, we promote the white-light reflectance functions for every direction to multispectral, effectively hallucinating the appearance of the subject under each LED spectrum for each lighting direction. We also use polarization imaging to separate the diffuse and specular components of the reflectance functions, spectrally promoting these components according to different models. We validate that the approximated multispectral reflectance functions closely match those generated by a fully multispectral omnidirectional lighting basis, suggesting a rapid multispectral reflectance field capture method which could be applied for live subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Optimal LED selection for multispectral lighting reproduction Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, ACM, New York, NY, 2016, ISBN: 978-1-4503-4371-8.
@inproceedings{legendre_optimal_2016,
title = {Optimal LED selection for multispectral lighting reproduction},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2945150},
doi = {10.1145/2945078.2945150},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
publisher = {ACM},
address = {New York, NY},
abstract = {We demonstrate the sufficiency of using as few as five LEDs of distinct spectra for multispectral lighting reproduction and solve for the optimal set of five from 11 such commercially available LEDs. We leverage published spectral reflectance, illuminant, and camera spectral sensitivity datasets to show that two approaches of lighting reproduction, matching illuminant spectra directly and matching material color appearance observed by one or more cameras or a human observer, yield the same LED selections. Our proposed optimal set of five LEDs includes red, green, and blue with narrow emission spectra, along with white and amber with broader spectra.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Liu, Dai; Busch, Jay; Jones, Andrew; Pattanaik, Sumanta; Debevec, Paul
Practical Multispectral Lighting Reproduction Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 4, pp. 1–11, 2016, ISSN: 07300301.
@article{legendre_practical_2016,
title = {Practical Multispectral Lighting Reproduction},
author = {Chloe LeGendre and Xueming Yu and Dai Liu and Jay Busch and Andrew Jones and Sumanta Pattanaik and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2925934},
doi = {10.1145/2897824.2925934},
issn = {07300301},
year = {2016},
date = {2016-07-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {4},
pages = {1–11},
abstract = {We present a practical framework for reproducing omnidirectional incident illumination conditions with complex spectra using a light stage with multispectral LED lights. For lighting acquisition, we augment standard RGB panoramic photography with one or more observations of a color chart with numerous reflectance spectra. We then solve for how to drive the multispectral light sources so that they best reproduce the appearance of the color charts in the original lighting. Even when solving for non-negative intensities, we show that accurate lighting reproduction is achievable using just four or six distinct LED spectra for a wide range of incident illumination spectra. A significant benefit of our approach is that it does not require the use of specialized equipment (other than the light stage) such as monochromators, spectroradiometers, or explicit knowledge of the LED power spectra, camera spectral response functions, or color chart reflectance spectra. We describe two simple devices for multispectral lighting capture, one for slow measurements of detailed angular spectral detail, and one for fast measurements with coarse angular detail. We validate the approach by realistically compositing real subjects into acquired lighting environments, showing accurate matches to how the subject would actually look within the environments, even for those including complex multispectral illumination. We also demonstrate dynamic lighting capture and playback using the technique.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jones, Andrew; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Barreto, Joseph; Alexander, Oleg; Bolas, Mark; Debevec, Paul; Unger, Jonas
Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array Proceedings Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18–26, Las Vegas, NV, 2016.
@inproceedings{jones_time-offset_2016,
title = {Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array},
author = {Andrew Jones and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Joseph Barreto and Oleg Alexander and Mark Bolas and Paul Debevec and Jonas Unger},
url = {http://www.cv-foundation.org//openaccess/content_cvpr_2016_workshops/w16/papers/Jones_Time-Offset_Conversations_on_CVPR_2016_paper.pdf},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},
pages = {18–26},
address = {Las Vegas, NV},
abstract = {We present a system for creating and displaying interactive life-sized 3D digital humans based on pre-recorded interviews. We use 30 cameras and an extensive list of questions to record a large set of video responses. Users access videos through a natural conversation interface that mimics face-to-face interaction. Recordings of answers, listening and idle behaviors are linked together to create a persistent visual image of the person throughout the interaction. The interview subjects are rendered using flowed light fields and shown life-size on a special rear-projection screen with an array of 216 video projectors. The display allows multiple users to see different 3D perspectives of the subject in proper relation to their viewpoints, without the need for stereo glasses. The display is effective for interactive conversations since it provides 3D cues such as eye gaze and spatial hand gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Graham, Paul; Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Journal Article
In: Computer Graphics Forum, 2016, ISSN: 1467-8659.
@article{fyffe_near-instant_2016,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Graham Fyffe and Paul Graham and Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.12837/abstract},
doi = {10.1111/cgf.12837},
issn = {1467-8659},
year = {2016},
date = {2016-05-01},
journal = {Computer Graphics Forum},
abstract = {We present a near-instant method for acquiring facial geometry and reflectance using a set of commodity DSLR cameras and flashes. Our setup consists of twenty-four cameras and six flashes which are fired in rapid succession with subsets of the cameras. Each camera records only a single photograph and the total capture time is less than the 67ms blink reflex. The cameras and flashes are specially arranged to produce an even distribution of specular highlights on the face. We employ this set of acquired images to estimate diffuse color, specular intensity, specular exponent, and surface orientation at each point on the face. We further refine the facial base geometry obtained from multi-view stereo using estimated diffuse and specular photometric information. This allows final submillimeter surface mesostructure detail to be obtained via shape-from-specularity. The final system uses commodity components and produces models suitable for authoring high-quality digital human characters.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Jones, Andrew; Hays, Kia; Maio, Heather; Alexander, Oleg; Artstein, Ron; Debevec, Paul; Gainer, Alesia; Georgila, Kallirroi; Haase, Kathleen; Jungblut, Karen; Leuski, Anton; Smith, Stephen; Swartout, William
New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling Book Section
In: Interactive Storytelling, vol. 9445, pp. 269–281, Springer International Publishing, Copenhagen, Denmark, 2015, ISBN: 978-3-319-27035-7 978-3-319-27036-4.
@incollection{traum_new_2015,
title = {New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling},
author = {David Traum and Andrew Jones and Kia Hays and Heather Maio and Oleg Alexander and Ron Artstein and Paul Debevec and Alesia Gainer and Kallirroi Georgila and Kathleen Haase and Karen Jungblut and Anton Leuski and Stephen Smith and William Swartout},
url = {http://link.springer.com/10.1007/978-3-319-27036-4_26},
isbn = {978-3-319-27035-7 978-3-319-27036-4},
year = {2015},
date = {2015-12-01},
booktitle = {Interactive Storytelling},
volume = {9445},
pages = {269–281},
publisher = {Springer International Publishing},
address = {Copenhagen, Denmark},
abstract = {We describe a digital system that allows people to have an interactive conversation with a human storyteller (a Holocaust survivor) who has recorded a number of dialogue contributions, including many compelling narratives of his experiences and thoughts. The goal is to preserve as much as possible of the experience of face-to-face interaction. The survivor's stories, answers to common questions, and testimony are recorded in high ⬚delity, and then delivered interactively to an audience as responses to spoken questions. People can ask questions and receive answers on a broad range of topics including the survivor's experiences before, after and during the war, his attitudes and philosophy. Evaluation results show that most user questions can be addressed by the system, and that audiences are highly engaged with the resulting interaction.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Graham, Paul; Fyffe, Graham; Tonwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Talks, pp. 1–1, ACM Press, 2015, ISBN: 978-1-4503-3636-9.
@inproceedings{graham_near-instant_2015,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Paul Graham and Graham Fyffe and Borom Tonwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Near-Instant%20Capture%20of%20High-Resolution%20Facial%20Geometry%20and%20Reflectance.pdf},
doi = {10.1145/2775280.2792561},
isbn = {978-1-4503-3636-9},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Talks},
pages = {1–1},
publisher = {ACM Press},
abstract = {Modeling realistic human characters is frequently done using 3D recordings of the shape and appearance of real people, often across a set of different facial expressions to build blendshape facial models. Believable characters that cross the "Uncanny Valley" require high-quality geometry, texture maps, reflectance properties, and surface detail at the level of skin pores and fine wrinkles. Unfortunately, there has not yet been a technique for recording such datasets that is near-instantaneous and low-cost. While some facial capture techniques are instantaneous and inexpensive [Beeler et al. 2010], these do not generally provide lighting-independent texture maps, specular reflectance information, or high-resolution surface normal detail for relighting. In contrast, techniques which use multiple photographs from spherical lighting setups [Ghosh et al. 2011] do capture such reflectance properties, at the expense of longer capture times and complicated custom equipment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Metallo, Adam; Rossi, Vincent; Blundell, Jonathan; Waibel, Günter; Graham, Paul; Fyffe, Graham; Yu, Xueming; Debevec, Paul
Scanning and printing a 3D portrait of president Barack Obama Proceedings Article
In: SIGGRAPH 2015: Studio, pp. 19, ACM, Los Angeles, CA, 2015.
@inproceedings{metallo_scanning_2015,
title = {Scanning and printing a 3D portrait of president Barack Obama},
author = {Adam Metallo and Vincent Rossi and Jonathan Blundell and Günter Waibel and Paul Graham and Graham Fyffe and Xueming Yu and Paul Debevec},
url = {http://ict.usc.edu/pubs/Scanning%20and%20Printing%20a%203D%20Portrait%20of%20President%20Barack%20Obama.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015: Studio},
pages = {19},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {On June 9th, 2014, we traveled to the State Dining Room of The White House to create a 3D Portrait of President Barack Obama using state-of-the-art 3D scanning and printing technology, producing the modern equivalent of the plaster life masks of President Lincoln from the 1860's.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Xueming; Wang, Shanhe; Busch, Jay; Phan, Thai; McSheery, Tracy; Bolas, Mark; Debevec, Paul
Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Posters, pp. 94, ACM, Los Angeles, CA, 2015.
@inproceedings{yu_virtual_2015,
title = {Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking},
author = {Xueming Yu and Shanhe Wang and Jay Busch and Thai Phan and Tracy McSheery and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Headcam%20-%20Pantilt%20Mirror-based%20Facial%20Performance%20Tracking.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Posters},
pages = {94},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {High-end facial performance capture solutions typically use head-mounted camera systems which provide one or more close-up video streams of each actor's performance. These provide clear views of each actor's performance, but can be bulky, uncomfortable, get in the way of sight lines, and prevent actors from getting close to each other. To address this, we propose a virtual head-mounted camera system: an array of cameras placed around around the performance capture volume which automatically track zoomed-in, sharply focussed, high-resolution views of the each actor's face from a multitude of directions. The resulting imagery can be used in conjunction with body motion capture data to derive nuanced facial performances without head-mounted cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Bolas, Mark; Debevec, Paul
An Automultiscopic Projector Array for Interactive Digital Humans Proceedings Article
In: SIGGRAPH 2015, pp. 1–1, ACM Press, Los Angeles, CA, 2015, ISBN: 978-1-4503-3635-2.
@inproceedings{jones_automultiscopic_2015,
title = {An Automultiscopic Projector Array for Interactive Digital Humans},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Automultiscopic%20Projector%20Array%20for%20Interactive%20Digital%20Humans.pdf},
doi = {10.1145/2782782.2792494},
isbn = {978-1-4503-3635-2},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015},
pages = {1–1},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear. Our display uses a dense array of video projectors to generate many images with high-angular density over a wide-field of view. As each user moves around the display, their eyes smoothly transition from one view to the next. The display is ideal for displaying life-size human subjects as it allows for natural personal interactions with 3D cues such as eye gaze and spatial hand gestures. In this installation, we will explore ”time-offset” interactions with recorded 3D human subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham; Alexander, Oleg; Barbiç, Jernej; Li, Hao; Ghosh, Abhijeet; Debevec, Paul
Skin Microstructure Deformation with Displacement Map Convolution Journal Article
In: ACM Transactions on Graphics, vol. 34, no. 4, pp. 1–10, 2015, ISSN: 07300301.
@article{nagano_skin_2015,
title = {Skin Microstructure Deformation with Displacement Map Convolution},
author = {Koki Nagano and Graham Fyffe and Oleg Alexander and Jernej Barbiç and Hao Li and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Skin%20Microstructure%20Deformation%20with%20Displacement%20Map%20Convolution.pdf},
doi = {10.1145/2766894},
issn = {07300301},
year = {2015},
date = {2015-07-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
journal = {ACM Transactions on Graphics},
volume = {34},
number = {4},
pages = {1–10},
address = {Los Angeles, CA},
abstract = {We present a technique for synthesizing the effects of skin microstructure deformation by anisotropically convolving a high-resolution displacement map to match normal distribution changes in measured skin samples. We use a 10-micron resolution scanning technique to measure several in vivo skin samples as they are stretched and compressed in different directions, quantifying how stretching smooths the skin and compression makes it rougher. We tabulate the resulting surface normal distributions, and show that convolving a neutral skin microstructure displacement map with blurring and sharpening filters can mimic normal distribution changes and microstructure deformations. We implement the spatially-varying displacement map filtering on the GPU to interactively render the effects of dynamic microgeometry on animated faces obtained from high-resolution facial scans.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fyffe, Graham; Debevec, Paul
Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination Proceedings Article
In: Preceedings of ICCP 2015, pp. 1–10, IEEE, Houston, Texas, 2015.
@inproceedings{fyffe_single-shot_2015,
title = {Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination},
author = {Graham Fyffe and Paul Debevec},
url = {http://ict.usc.edu/pubs/Single-Shot%20Reflectance%20Measurement%20from%20Polarized%20Color%20Gradient%20Illumination.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Preceedings of ICCP 2015},
pages = {1–10},
publisher = {IEEE},
address = {Houston, Texas},
abstract = {We present a method for acquiring the per-pixel diffuse albedo, specular albedo, and surface normal maps of a subject at a single instant in time. The method is single shot, requiring no optical flow, and per-pixel, making no assumptions regarding albedo statistics or surface connectivity. We photograph the subject inside a spherical illumination device emitting a static lighting pattern of vertically polarized RGB color gradients aligned with the XYZ axes, and horizontally polarized RGB color gradients in versely aligned with the XYZ axes. We capture simultaneous photographs using one of two possible setups: a single view setup using a coaxially aligned camera pair with a polarizing beam splitter, and a multi-view stereo setup with different orientations of linear polarizing filters placed on the cameras, enabling high-quality geometry reconstruction. From this lighting we derive full-color diffuse albedo, single-channel specular albedo suitable for dielectric materials, and polarization-preserving surface normals which are free of corruption from subsurface scattering. We provide simple formulae to estimate the diffuse albedo, specular albedo, and surface normal maps in the single-view and multi-view cases and show error bounds which are small for many common subjects including faces.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Building a Life-Size Automultiscopic Display Using Consumer Hardware Proceedings Article
In: Proceedings of GPU Technology Conference, San Jose, CA, 2015.
@inproceedings{jones_building_2015,
title = {Building a Life-Size Automultiscopic Display Using Consumer Hardware},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Building%20a%20Life-Size%20Automultiscopic%20Display%20Using%20Consumer%20Hardware.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {Proceedings of GPU Technology Conference},
address = {San Jose, CA},
abstract = {Automultiscopic displays allow multiple users to experience 3D content without the hassle of special glasses or head gear. Such displays generate many simultaneous images with high-angular density, so that each eye perceives a distinct and different view. This presents a unique challenge for content acquisition and rendering. In this talk, we explain how to build an automultiscopic display using off-the-shelf projectors, video-splitters, and graphics cards. We also present a GPU-based algorithm for rendering a large numbers of views from a sparse array of video cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Alexander, Oleg; Feng, Andrew W.; Fyffe, Graham; Ichikari, Ryosuke; Debevec, Paul; Wang, Rhuizhe; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors Proceedings Article
In: Proceedings of the 19th Symposium on Interactive 3D Graphics and Games, pp. 134–134, ACM Press, San Francisco, CA, 2015, ISBN: 978-1-4503-3392-4.
@inproceedings{casas_rapid_2015,
title = {Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors},
author = {Dan Casas and Oleg Alexander and Andrew W. Feng and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2699276.2721398},
doi = {10.1145/2699276.2721398},
isbn = {978-1-4503-3392-4},
year = {2015},
date = {2015-02-01},
booktitle = {Proceedings of the 19th Symposium on Interactive 3D Graphics and Games},
pages = {134–134},
publisher = {ACM Press},
address = {San Francisco, CA},
abstract = {Creating and animating a realistic 3D human face has been an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. In this paper, we propose a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single Microsoft Kinect and requires no artistic expertise on the part of the capture subject. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires 4 minutes of processing time to transform it into a blendshape model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmark detection and optical flow. We demonstrate the effectiveness of the proposed method by capturing 3D facial models of different human subjects and puppeteering their models in an animation system with real-time facial performance retargeting.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Debevec, Paul
Driving High-Resolution Facial Scans with Video Performance Capture Journal Article
In: ACM Transactions on Graphics (TOG), vol. 34, no. 1, pp. 1– 13, 2014.
@article{fyffe_driving_2014,
title = {Driving High-Resolution Facial Scans with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Scans%20with%20Video%20Performance%20Capture.pdf},
year = {2014},
date = {2014-11-01},
journal = {ACM Transactions on Graphics (TOG)},
volume = {34},
number = {1},
pages = {1– 13},
abstract = {We present a process for rendering a realistic facial performance with control of viewpoint and illumination. The performance is based on one or more high-quality geometry and reflectance scans of an actor in static poses, driven by one or more video streams of a performance. We compute optical flow correspondences between neighboring video frames, and a sparse set of correspondences between static scans and video frames. The latter are made possible by leveraging the relightability of the static 3D scans to match the viewpoint(s) and appearance of the actor in videos taken in arbitrary environments. As optical flow tends to compute proper correspondence for some areas but not others, we also compute a smoothed, per-pixel confidence map for every computed flow, based on normalized cross-correlation. These flows and their confidences yield a set of weighted triangulation constraints among the static poses and the frames of a performance. Given a single artist-prepared face mesh for one static pose, we optimally combine the weighted triangulation constraints, along with a shape regularization term, into a consistent 3D geometry solution over the entire performance that is drift free by construction. In contrast to previous work, even partial correspondences contribute to drift minimization, for example, where a successful match is found in the eye region but not the mouth. Our shape regularization employs a differential shape term based on a spatially varying blend of the differential shapes of the static poses and neighboring dynamic poses, weighted by the associated flow confidences. These weights also permit dynamic reflectance maps to be produced for the performance by blending the static scan maps. Finally, as the geometry and maps are represented on a consistent artist-friendly mesh, we render the resulting high-quality animated face geometry and animated reflectance maps using standard rendering tools.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2018
LeGendre, Chloe; Bladin, Kalle; Kishore, Bipin; Ren, Xinglei; Yu, Xueming; Debevec, Paul
Efficient Multispectral Facial Capture with Monochrome Cameras Proceedings Article
In: ACM SIGGRAPH 2018 Posters on - SIGGRAPH '18, ACM Press, Vancouver, British Columbia, Canada, 2018, ISBN: 978-1-4503-5817-0.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{legendre_efficient_2018,
title = {Efficient Multispectral Facial Capture with Monochrome Cameras},
author = {Chloe LeGendre and Kalle Bladin and Bipin Kishore and Xinglei Ren and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?doid=3230744.3230778},
doi = {10.1145/3230744.3230778},
isbn = {978-1-4503-5817-0},
year = {2018},
date = {2018-08-01},
booktitle = {ACM SIGGRAPH 2018 Posters on - SIGGRAPH '18},
publisher = {ACM Press},
address = {Vancouver, British Columbia, Canada},
abstract = {We propose a variant to polarized gradient illumination facial scanning which uses monochrome instead of color cameras to achieve more efficient and higher-resolution results. In typical polarized gradient facial scanning, sub-millimeter geometric detail is acquired by photographing the subject in eight or more polarized spherical gradient lighting conditions made with white LEDs, and RGB cameras are used to acquire color texture maps of the subject's appearance. In our approach, we replace the color cameras and white LEDs with monochrome cameras and multispectral, colored LEDs, leveraging that color images can be formed from successive monochrome images recorded under different illumination colors. While a naive extension of the scanning process to this setup would require multiplying the number of images by number of color channels, we show that the surface detail maps can be estimated directly from monochrome imagery, so that only an additional n photographs are required, where n is the number of added spectral channels. We also introduce a new multispectral optical flow approach to align images across spectral channels in the presence of slight subject motion. Lastly, for the case where a capture system's white light sources are polarized and its multispectral colored LEDs are not, we introduce the technique of multispectral polarization promotion, where we estimate the cross- and parallel-polarized monochrome images for each spectral channel from their corresponding images under a full sphere of even, unpolarized illumination. We demonstrate that this technique allows us to efficiently acquire a full color (or even multispectral) facial scan using monochrome cameras, unpolarized multispectral colored LEDs, and polarized white LEDs.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Huynh, Loc; Chen, Weikai; Saito, Shunsuke; Xing, Jun; Nagano, Koki; Jones, Andrew; Debevec, Paul; Li, Hao
Mesoscopic Facial Geometry Inference Using Deep Neural Networks Proceedings Article
In: Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition, IEEE, Salt Lake City, UT, 2018.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{huynh_mesoscopic_2018,
title = {Mesoscopic Facial Geometry Inference Using Deep Neural Networks},
author = {Loc Huynh and Weikai Chen and Shunsuke Saito and Jun Xing and Koki Nagano and Andrew Jones and Paul Debevec and Hao Li},
url = {http://openaccess.thecvf.com/content_cvpr_2018/papers/Huynh_Mesoscopic_Facial_Geometry_CVPR_2018_paper.pdf},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition},
publisher = {IEEE},
address = {Salt Lake City, UT},
abstract = {We present a learning-based approach for synthesizing facial geometry at medium and fine scales from diffusely-lit facial texture maps. When applied to an image sequence, the synthesized detail is temporally coherent. Unlike current state-of-the-art methods [17, 5], which assume ”dark is deep”, our model is trained with measured facial detail collected using polarized gradient illumination in a Light Stage [20]. This enables us to produce plausible facial detail across the entire face, including where previous approaches may incorrectly interpret dark features as concavities such as at moles, hair stubble, and occluded pores. Instead of directly inferring 3D geometry, we propose to encode fine details in high-resolution displacement maps which are learned through a hybrid network adopting the state-of-the-art image-to-image translation network [29] and super resolution network [43]. To effectively capture geometric detail at both mid- and high frequencies, we factorize the learning into two separate sub-networks, enabling the full range of facial detail to be modeled. Results from our learning-based approach compare favorably with a high-quality active facial scanhening technique, and require only a single passive lighting condition without a complex scanning setup.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
LeGendre, Chloe; Hyunh, Loc; Wang, Shanhe; Debevec, Paul
Modeling vellus facial hair from asperity scattering silhouettes Proceedings Article
In: Proceedings of SIGGRAPH 2017, pp. 1–2, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5008-2.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{legendre_modeling_2017,
title = {Modeling vellus facial hair from asperity scattering silhouettes},
author = {Chloe LeGendre and Loc Hyunh and Shanhe Wang and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?doid=3084363.3085057},
doi = {10.1145/3084363.3085057},
isbn = {978-1-4503-5008-2},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of SIGGRAPH 2017},
pages = {1–2},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a technique for modeling the vellus hair over the face based on observations of asperity scattering along a subject's silhouette. We photograph the backlit subject in profile and three-quarters views with a high-resolution DSLR camera to observe the vellus hair on the side and front of the face and separately acquire a 3D scan of the face geometry and texture. We render a library of backlit vellus hair patch samples with different geometric parameters such as density, orientation, and curvature, and we compute image statistics for each set of parameters. We trace the silhouette contour in each face image and straighten the backlit hair silhouettes using image resampling. We compute image statistics for each section of the facial silhouette and determine which set of hair modeling parameters best matches the statistics. We then generate a complete set of vellus hairs for the face by interpolating and extrapolating the matched parameters over the skin. We add the modeled vellus hairs to the 3D facial scan and generate renderings under novel lighting conditions, generally matching the appearance of real photographs.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Krissman, David; Debevec, Paul
Improved Chromakey of Hair Strands via Orientation Filter Convolution Proceedings Article
In: Proceeding of SIGGRAPH '17 ACM SIGGRAPH 2017, pp. 1–2, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5015-0.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{legendre_improved_2017,
title = {Improved Chromakey of Hair Strands via Orientation Filter Convolution},
author = {Chloe LeGendre and David Krissman and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=3102200},
doi = {10.1145/3102163.3102200},
isbn = {978-1-4503-5015-0},
year = {2017},
date = {2017-07-01},
booktitle = {Proceeding of SIGGRAPH '17 ACM SIGGRAPH 2017},
pages = {1–2},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a technique for improving the alpha maing of challenging green-screen video sequences involving hair strands. As hair strands are thin and can be semi-translucent, they are especially hard to separate from a background. However, they appear as extended lines and thus have a strong response when convolved with oriented filters, even in the presence of noise. We leverage this oriented filter response to robustly locate hair strands within each frame of an actor’s performance filmed in front of a green-screen. We demonstrate using production video footage that individual hair fibers excluded from a coarse artist’s matte can be located and then added to the foreground element, qualitatively improving the composite result without added manual labor.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Efficient Multispectral Reflectance Function Capture for Image-Based Relighting Proceedings Article
In: Proceedings of the Color and Imaging Conference, pp. 47–58, Society for Imaging Science and Technology, San Diego, CA, 2016.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{legendre_efficient_2016,
title = {Efficient Multispectral Reflectance Function Capture for Image-Based Relighting},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://www.ingentaconnect.com/contentone/ist/cic/2016/00002016/00000001/art00008},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the Color and Imaging Conference},
pages = {47–58},
publisher = {Society for Imaging Science and Technology},
address = {San Diego, CA},
abstract = {Image-based relighting (IBRL) renders the appearance of a subject in a novel lighting environment as a linear combination of the images of its reflectance field , the appearance of the subject lit by each incident lighting direction. Traditionally, a tristimulus color camera records the reflectance field as the subject is sequentially illuminated by broad-spectrum white light sources from each direction. Using a multispectral LED sphere and either a tristimulus (RGB) or monochrome camera, we photograph a still life scene to acquire its multispectral reflectance field – its appearance for every lighting direction for multiple incident illumination spectra. For the tristimulus camera, we demonstrate improved color rendition for IBRL when using the multispectral reflectance field, producing a closer match to the scene's actual appearance in a real-world illumination environment. For the monochrome camera, we also show close visual matches. We additionally propose an efficient method for acquiring such multispectral reflectance fields, augmenting the traditional broad-spectrum lighting basis capture with only a few additional images equal to the desired number of spectral channels. In these additional images, we illuminate the subject by a complete sphere of each available narrow-band LED light source, in our case: red, amber, green, cyan, and blue. From the full-sphere illumination images, we promote the white-light reflectance functions for every direction to multispectral, effectively hallucinating the appearance of the subject under each LED spectrum for each lighting direction. We also use polarization imaging to separate the diffuse and specular components of the reflectance functions, spectrally promoting these components according to different models. We validate that the approximated multispectral reflectance functions closely match those generated by a fully multispectral omnidirectional lighting basis, suggesting a rapid multispectral reflectance field capture method which could be applied for live subjects.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Optimal LED selection for multispectral lighting reproduction Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, ACM, New York, NY, 2016, ISBN: 978-1-4503-4371-8.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{legendre_optimal_2016,
title = {Optimal LED selection for multispectral lighting reproduction},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2945150},
doi = {10.1145/2945078.2945150},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
publisher = {ACM},
address = {New York, NY},
abstract = {We demonstrate the sufficiency of using as few as five LEDs of distinct spectra for multispectral lighting reproduction and solve for the optimal set of five from 11 such commercially available LEDs. We leverage published spectral reflectance, illuminant, and camera spectral sensitivity datasets to show that two approaches of lighting reproduction, matching illuminant spectra directly and matching material color appearance observed by one or more cameras or a human observer, yield the same LED selections. Our proposed optimal set of five LEDs includes red, green, and blue with narrow emission spectra, along with white and amber with broader spectra.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Liu, Dai; Busch, Jay; Jones, Andrew; Pattanaik, Sumanta; Debevec, Paul
Practical Multispectral Lighting Reproduction Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 4, pp. 1–11, 2016, ISSN: 07300301.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@article{legendre_practical_2016,
title = {Practical Multispectral Lighting Reproduction},
author = {Chloe LeGendre and Xueming Yu and Dai Liu and Jay Busch and Andrew Jones and Sumanta Pattanaik and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2925934},
doi = {10.1145/2897824.2925934},
issn = {07300301},
year = {2016},
date = {2016-07-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {4},
pages = {1–11},
abstract = {We present a practical framework for reproducing omnidirectional incident illumination conditions with complex spectra using a light stage with multispectral LED lights. For lighting acquisition, we augment standard RGB panoramic photography with one or more observations of a color chart with numerous reflectance spectra. We then solve for how to drive the multispectral light sources so that they best reproduce the appearance of the color charts in the original lighting. Even when solving for non-negative intensities, we show that accurate lighting reproduction is achievable using just four or six distinct LED spectra for a wide range of incident illumination spectra. A significant benefit of our approach is that it does not require the use of specialized equipment (other than the light stage) such as monochromators, spectroradiometers, or explicit knowledge of the LED power spectra, camera spectral response functions, or color chart reflectance spectra. We describe two simple devices for multispectral lighting capture, one for slow measurements of detailed angular spectral detail, and one for fast measurements with coarse angular detail. We validate the approach by realistically compositing real subjects into acquired lighting environments, showing accurate matches to how the subject would actually look within the environments, even for those including complex multispectral illumination. We also demonstrate dynamic lighting capture and playback using the technique.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Jones, Andrew; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Barreto, Joseph; Alexander, Oleg; Bolas, Mark; Debevec, Paul; Unger, Jonas
Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array Proceedings Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18–26, Las Vegas, NV, 2016.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, MxR, UARC
@inproceedings{jones_time-offset_2016,
title = {Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array},
author = {Andrew Jones and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Joseph Barreto and Oleg Alexander and Mark Bolas and Paul Debevec and Jonas Unger},
url = {http://www.cv-foundation.org//openaccess/content_cvpr_2016_workshops/w16/papers/Jones_Time-Offset_Conversations_on_CVPR_2016_paper.pdf},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},
pages = {18–26},
address = {Las Vegas, NV},
abstract = {We present a system for creating and displaying interactive life-sized 3D digital humans based on pre-recorded interviews. We use 30 cameras and an extensive list of questions to record a large set of video responses. Users access videos through a natural conversation interface that mimics face-to-face interaction. Recordings of answers, listening and idle behaviors are linked together to create a persistent visual image of the person throughout the interaction. The interview subjects are rendered using flowed light fields and shown life-size on a special rear-projection screen with an array of 216 video projectors. The display allows multiple users to see different 3D perspectives of the subject in proper relation to their viewpoints, without the need for stereo glasses. The display is effective for interactive conversations since it provides 3D cues such as eye gaze and spatial hand gestures.},
keywords = {DTIC, Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Graham, Paul; Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Journal Article
In: Computer Graphics Forum, 2016, ISSN: 1467-8659.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@article{fyffe_near-instant_2016,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Graham Fyffe and Paul Graham and Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.12837/abstract},
doi = {10.1111/cgf.12837},
issn = {1467-8659},
year = {2016},
date = {2016-05-01},
journal = {Computer Graphics Forum},
abstract = {We present a near-instant method for acquiring facial geometry and reflectance using a set of commodity DSLR cameras and flashes. Our setup consists of twenty-four cameras and six flashes which are fired in rapid succession with subsets of the cameras. Each camera records only a single photograph and the total capture time is less than the 67ms blink reflex. The cameras and flashes are specially arranged to produce an even distribution of specular highlights on the face. We employ this set of acquired images to estimate diffuse color, specular intensity, specular exponent, and surface orientation at each point on the face. We further refine the facial base geometry obtained from multi-view stereo using estimated diffuse and specular photometric information. This allows final submillimeter surface mesostructure detail to be obtained via shape-from-specularity. The final system uses commodity components and produces models suitable for authoring high-quality digital human characters.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, MxR, UARC, Virtual Humans
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {DTIC, Graphics, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Traum, David; Jones, Andrew; Hays, Kia; Maio, Heather; Alexander, Oleg; Artstein, Ron; Debevec, Paul; Gainer, Alesia; Georgila, Kallirroi; Haase, Kathleen; Jungblut, Karen; Leuski, Anton; Smith, Stephen; Swartout, William
New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling Book Section
In: Interactive Storytelling, vol. 9445, pp. 269–281, Springer International Publishing, Copenhagen, Denmark, 2015, ISBN: 978-3-319-27035-7 978-3-319-27036-4.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, Virtual Humans
@incollection{traum_new_2015,
title = {New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling},
author = {David Traum and Andrew Jones and Kia Hays and Heather Maio and Oleg Alexander and Ron Artstein and Paul Debevec and Alesia Gainer and Kallirroi Georgila and Kathleen Haase and Karen Jungblut and Anton Leuski and Stephen Smith and William Swartout},
url = {http://link.springer.com/10.1007/978-3-319-27036-4_26},
isbn = {978-3-319-27035-7 978-3-319-27036-4},
year = {2015},
date = {2015-12-01},
booktitle = {Interactive Storytelling},
volume = {9445},
pages = {269–281},
publisher = {Springer International Publishing},
address = {Copenhagen, Denmark},
abstract = {We describe a digital system that allows people to have an interactive conversation with a human storyteller (a Holocaust survivor) who has recorded a number of dialogue contributions, including many compelling narratives of his experiences and thoughts. The goal is to preserve as much as possible of the experience of face-to-face interaction. The survivor's stories, answers to common questions, and testimony are recorded in high ⬚delity, and then delivered interactively to an audience as responses to spoken questions. People can ask questions and receive answers on a broad range of topics including the survivor's experiences before, after and during the war, his attitudes and philosophy. Evaluation results show that most user questions can be addressed by the system, and that audiences are highly engaged with the resulting interaction.},
keywords = {DTIC, Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Graham, Paul; Fyffe, Graham; Tonwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Talks, pp. 1–1, ACM Press, 2015, ISBN: 978-1-4503-3636-9.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{graham_near-instant_2015,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Paul Graham and Graham Fyffe and Borom Tonwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Near-Instant%20Capture%20of%20High-Resolution%20Facial%20Geometry%20and%20Reflectance.pdf},
doi = {10.1145/2775280.2792561},
isbn = {978-1-4503-3636-9},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Talks},
pages = {1–1},
publisher = {ACM Press},
abstract = {Modeling realistic human characters is frequently done using 3D recordings of the shape and appearance of real people, often across a set of different facial expressions to build blendshape facial models. Believable characters that cross the "Uncanny Valley" require high-quality geometry, texture maps, reflectance properties, and surface detail at the level of skin pores and fine wrinkles. Unfortunately, there has not yet been a technique for recording such datasets that is near-instantaneous and low-cost. While some facial capture techniques are instantaneous and inexpensive [Beeler et al. 2010], these do not generally provide lighting-independent texture maps, specular reflectance information, or high-resolution surface normal detail for relighting. In contrast, techniques which use multiple photographs from spherical lighting setups [Ghosh et al. 2011] do capture such reflectance properties, at the expense of longer capture times and complicated custom equipment.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Metallo, Adam; Rossi, Vincent; Blundell, Jonathan; Waibel, Günter; Graham, Paul; Fyffe, Graham; Yu, Xueming; Debevec, Paul
Scanning and printing a 3D portrait of president Barack Obama Proceedings Article
In: SIGGRAPH 2015: Studio, pp. 19, ACM, Los Angeles, CA, 2015.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{metallo_scanning_2015,
title = {Scanning and printing a 3D portrait of president Barack Obama},
author = {Adam Metallo and Vincent Rossi and Jonathan Blundell and Günter Waibel and Paul Graham and Graham Fyffe and Xueming Yu and Paul Debevec},
url = {http://ict.usc.edu/pubs/Scanning%20and%20Printing%20a%203D%20Portrait%20of%20President%20Barack%20Obama.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015: Studio},
pages = {19},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {On June 9th, 2014, we traveled to the State Dining Room of The White House to create a 3D Portrait of President Barack Obama using state-of-the-art 3D scanning and printing technology, producing the modern equivalent of the plaster life masks of President Lincoln from the 1860's.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Xueming; Wang, Shanhe; Busch, Jay; Phan, Thai; McSheery, Tracy; Bolas, Mark; Debevec, Paul
Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Posters, pp. 94, ACM, Los Angeles, CA, 2015.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, MxR, UARC
@inproceedings{yu_virtual_2015,
title = {Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking},
author = {Xueming Yu and Shanhe Wang and Jay Busch and Thai Phan and Tracy McSheery and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Headcam%20-%20Pantilt%20Mirror-based%20Facial%20Performance%20Tracking.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Posters},
pages = {94},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {High-end facial performance capture solutions typically use head-mounted camera systems which provide one or more close-up video streams of each actor's performance. These provide clear views of each actor's performance, but can be bulky, uncomfortable, get in the way of sight lines, and prevent actors from getting close to each other. To address this, we propose a virtual head-mounted camera system: an array of cameras placed around around the performance capture volume which automatically track zoomed-in, sharply focussed, high-resolution views of the each actor's face from a multitude of directions. The resulting imagery can be used in conjunction with body motion capture data to derive nuanced facial performances without head-mounted cameras.},
keywords = {DTIC, Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Bolas, Mark; Debevec, Paul
An Automultiscopic Projector Array for Interactive Digital Humans Proceedings Article
In: SIGGRAPH 2015, pp. 1–1, ACM Press, Los Angeles, CA, 2015, ISBN: 978-1-4503-3635-2.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, MxR, UARC
@inproceedings{jones_automultiscopic_2015,
title = {An Automultiscopic Projector Array for Interactive Digital Humans},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Automultiscopic%20Projector%20Array%20for%20Interactive%20Digital%20Humans.pdf},
doi = {10.1145/2782782.2792494},
isbn = {978-1-4503-3635-2},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015},
pages = {1–1},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear. Our display uses a dense array of video projectors to generate many images with high-angular density over a wide-field of view. As each user moves around the display, their eyes smoothly transition from one view to the next. The display is ideal for displaying life-size human subjects as it allows for natural personal interactions with 3D cues such as eye gaze and spatial hand gestures. In this installation, we will explore ”time-offset” interactions with recorded 3D human subjects.},
keywords = {DTIC, Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham; Alexander, Oleg; Barbiç, Jernej; Li, Hao; Ghosh, Abhijeet; Debevec, Paul
Skin Microstructure Deformation with Displacement Map Convolution Journal Article
In: ACM Transactions on Graphics, vol. 34, no. 4, pp. 1–10, 2015, ISSN: 07300301.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@article{nagano_skin_2015,
title = {Skin Microstructure Deformation with Displacement Map Convolution},
author = {Koki Nagano and Graham Fyffe and Oleg Alexander and Jernej Barbiç and Hao Li and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Skin%20Microstructure%20Deformation%20with%20Displacement%20Map%20Convolution.pdf},
doi = {10.1145/2766894},
issn = {07300301},
year = {2015},
date = {2015-07-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
journal = {ACM Transactions on Graphics},
volume = {34},
number = {4},
pages = {1–10},
address = {Los Angeles, CA},
abstract = {We present a technique for synthesizing the effects of skin microstructure deformation by anisotropically convolving a high-resolution displacement map to match normal distribution changes in measured skin samples. We use a 10-micron resolution scanning technique to measure several in vivo skin samples as they are stretched and compressed in different directions, quantifying how stretching smooths the skin and compression makes it rougher. We tabulate the resulting surface normal distributions, and show that convolving a neutral skin microstructure displacement map with blurring and sharpening filters can mimic normal distribution changes and microstructure deformations. We implement the spatially-varying displacement map filtering on the GPU to interactively render the effects of dynamic microgeometry on animated faces obtained from high-resolution facial scans.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Fyffe, Graham; Debevec, Paul
Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination Proceedings Article
In: Preceedings of ICCP 2015, pp. 1–10, IEEE, Houston, Texas, 2015.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{fyffe_single-shot_2015,
title = {Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination},
author = {Graham Fyffe and Paul Debevec},
url = {http://ict.usc.edu/pubs/Single-Shot%20Reflectance%20Measurement%20from%20Polarized%20Color%20Gradient%20Illumination.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Preceedings of ICCP 2015},
pages = {1–10},
publisher = {IEEE},
address = {Houston, Texas},
abstract = {We present a method for acquiring the per-pixel diffuse albedo, specular albedo, and surface normal maps of a subject at a single instant in time. The method is single shot, requiring no optical flow, and per-pixel, making no assumptions regarding albedo statistics or surface connectivity. We photograph the subject inside a spherical illumination device emitting a static lighting pattern of vertically polarized RGB color gradients aligned with the XYZ axes, and horizontally polarized RGB color gradients in versely aligned with the XYZ axes. We capture simultaneous photographs using one of two possible setups: a single view setup using a coaxially aligned camera pair with a polarizing beam splitter, and a multi-view stereo setup with different orientations of linear polarizing filters placed on the cameras, enabling high-quality geometry reconstruction. From this lighting we derive full-color diffuse albedo, single-channel specular albedo suitable for dielectric materials, and polarization-preserving surface normals which are free of corruption from subsurface scattering. We provide simple formulae to estimate the diffuse albedo, specular albedo, and surface normal maps in the single-view and multi-view cases and show error bounds which are small for many common subjects including faces.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Building a Life-Size Automultiscopic Display Using Consumer Hardware Proceedings Article
In: Proceedings of GPU Technology Conference, San Jose, CA, 2015.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{jones_building_2015,
title = {Building a Life-Size Automultiscopic Display Using Consumer Hardware},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Building%20a%20Life-Size%20Automultiscopic%20Display%20Using%20Consumer%20Hardware.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {Proceedings of GPU Technology Conference},
address = {San Jose, CA},
abstract = {Automultiscopic displays allow multiple users to experience 3D content without the hassle of special glasses or head gear. Such displays generate many simultaneous images with high-angular density, so that each eye perceives a distinct and different view. This presents a unique challenge for content acquisition and rendering. In this talk, we explain how to build an automultiscopic display using off-the-shelf projectors, video-splitters, and graphics cards. We also present a GPU-based algorithm for rendering a large numbers of views from a sparse array of video cameras.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Alexander, Oleg; Feng, Andrew W.; Fyffe, Graham; Ichikari, Ryosuke; Debevec, Paul; Wang, Rhuizhe; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors Proceedings Article
In: Proceedings of the 19th Symposium on Interactive 3D Graphics and Games, pp. 134–134, ACM Press, San Francisco, CA, 2015, ISBN: 978-1-4503-3392-4.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, MxR, UARC, Virtual Humans
@inproceedings{casas_rapid_2015,
title = {Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors},
author = {Dan Casas and Oleg Alexander and Andrew W. Feng and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2699276.2721398},
doi = {10.1145/2699276.2721398},
isbn = {978-1-4503-3392-4},
year = {2015},
date = {2015-02-01},
booktitle = {Proceedings of the 19th Symposium on Interactive 3D Graphics and Games},
pages = {134–134},
publisher = {ACM Press},
address = {San Francisco, CA},
abstract = {Creating and animating a realistic 3D human face has been an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. In this paper, we propose a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single Microsoft Kinect and requires no artistic expertise on the part of the capture subject. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires 4 minutes of processing time to transform it into a blendshape model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmark detection and optical flow. We demonstrate the effectiveness of the proposed method by capturing 3D facial models of different human subjects and puppeteering their models in an animation system with real-time facial performance retargeting.},
keywords = {DTIC, Graphics, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2014
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Debevec, Paul
Driving High-Resolution Facial Scans with Video Performance Capture Journal Article
In: ACM Transactions on Graphics (TOG), vol. 34, no. 1, pp. 1– 13, 2014.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@article{fyffe_driving_2014,
title = {Driving High-Resolution Facial Scans with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Scans%20with%20Video%20Performance%20Capture.pdf},
year = {2014},
date = {2014-11-01},
journal = {ACM Transactions on Graphics (TOG)},
volume = {34},
number = {1},
pages = {1– 13},
abstract = {We present a process for rendering a realistic facial performance with control of viewpoint and illumination. The performance is based on one or more high-quality geometry and reflectance scans of an actor in static poses, driven by one or more video streams of a performance. We compute optical flow correspondences between neighboring video frames, and a sparse set of correspondences between static scans and video frames. The latter are made possible by leveraging the relightability of the static 3D scans to match the viewpoint(s) and appearance of the actor in videos taken in arbitrary environments. As optical flow tends to compute proper correspondence for some areas but not others, we also compute a smoothed, per-pixel confidence map for every computed flow, based on normalized cross-correlation. These flows and their confidences yield a set of weighted triangulation constraints among the static poses and the frames of a performance. Given a single artist-prepared face mesh for one static pose, we optimally combine the weighted triangulation constraints, along with a shape regularization term, into a consistent 3D geometry solution over the entire performance that is drift free by construction. In contrast to previous work, even partial correspondences contribute to drift minimization, for example, where a successful match is found in the eye region but not the mouth. Our shape regularization employs a differential shape term based on a spatially varying blend of the differential shapes of the static poses and neighboring dynamic poses, weighted by the associated flow confidences. These weights also permit dynamic reflectance maps to be produced for the performance by blending the static scan maps. Finally, as the geometry and maps are represented on a consistent artist-friendly mesh, we render the resulting high-quality animated face geometry and animated reflectance maps using standard rendering tools.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Nagano, Koki; Alexander, Oleg; Barbic, Jernej; Debevec, Paul
Measurement and Modeling of Microfacet Distributions under Deformation Proceedings Article
In: Proceedings of SIGDIAL 2014, ACM, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2960-6.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{nagano_measurement_2014,
title = {Measurement and Modeling of Microfacet Distributions under Deformation},
author = {Koki Nagano and Oleg Alexander and Jernej Barbic and Paul Debevec},
url = {http://ict.usc.edu/pubs/Measurement%20and%20Modeling%20of%20Microfacet%20Distribution%20under%20Deformation%20(abstract%20for%20talk).pdf},
doi = {10.1145/2614106.2614124},
isbn = {978-1-4503-2960-6},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of SIGDIAL 2014},
publisher = {ACM},
address = {Vancouver, British Columbia, Canada},
abstract = {We endeavor to model dynamic microfacet distributions of rough surfaces such as skin to simulate the changes in surface BRDF under stretching and compression. We begin by measuring microfacet distributions at 5-micron scale of several surface patches under controlled deformation. Generally speaking, rough surfaces become flatter and thus shinier as they are pulled tighter, and become rougher under compression. From this data, we build a model of how surface reflectance changes as the material deforms. We then simulate dynamic surface reflectance by modifying the anisotropic roughness parameters of a microfacet distribution model in accordance with animated surface deformations. Furthermore, we directly render such dynamic appearance by driving dynamic micro geometries to demonstrate how they influence the meso-scale surface reflectance.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man” Proceedings Article
In: SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques, Vancouver, Canada, 2014.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{jones_creating_2014,
title = {Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man”},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Creating%20a%20life-sized%20automulitscopic%20Morgan%20Spurlock%20for%20CNNs%20%e2%80%9cInside%20Man%e2%80%9d%20(abstract).pdf},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Vancouver, Canada},
abstract = {We present a system for capturing and rendering life-size 3D human subjects on an automultiscopic display. Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Pahlen, Javier; Jimenez, Jorge; Danvoye, Etienne; Debevec, Paul; Fyffe, Graham; Alexander, Oleg
Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters Proceedings Article
In: SIGGRAPH '14 ACM SIGGRAPH 2014 Courses, pp. 1–384, ACM Press, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2962-0.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{von_der_pahlen_digital_2014,
title = {Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters},
author = {Javier Pahlen and Jorge Jimenez and Etienne Danvoye and Paul Debevec and Graham Fyffe and Oleg Alexander},
url = {http://ict.usc.edu/pubs/Digial%20Ira%20and%20Beyond%20-%20Creating%20Photoreal%20Real-Time%20Digital%20Characters%20(course%20notes).pdf},
doi = {10.1145/2614028.2615407},
isbn = {978-1-4503-2962-0},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH '14 ACM SIGGRAPH 2014 Courses},
pages = {1–384},
publisher = {ACM Press},
address = {Vancouver, British Columbia, Canada},
abstract = {This course explains a complete process for creating next-generation realtime digital human characters, using the Digital Ira collaboration between USC ICT and Activision as an example, covering highres facial scanning, blendshape rigging, video-based performance capture, animation compression, realtime skin and eye shading, hair, latest results, and future directions.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
Interpolating vertical parallax for an autostereoscopic three-dimensional projector array Journal Article
In: Journal of Electronic Imaging, vol. 23, no. 1, 2014, ISSN: 1017-9909.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, MxR, UARC
@article{jones_interpolating_2014,
title = {Interpolating vertical parallax for an autostereoscopic three-dimensional projector array},
author = {Andrew Jones and Koki Nagano and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://electronicimaging.spiedigitallibrary.org/article.aspx?doi=10.1117/1.JEI.23.1.011005},
doi = {10.1117/1.JEI.23.1.011005},
issn = {1017-9909},
year = {2014},
date = {2014-03-01},
journal = {Journal of Electronic Imaging},
volume = {23},
number = {1},
abstract = {We present a technique for achieving tracked vertical parallax for multiple users using a variety of autostereoscopic projector array setups, including front- and rear-projection and curved display surfaces. This hybrid parallax approach allows for immediate horizontal parallax as viewers move left and right and tracked parallax as they move up and down, allowing cues such as three-dimensional (3-D) perspective and eye contact to be conveyed faithfully. We use a low-cost RGB-depth sensor to simultaneously track multiple viewer head positions in 3-D space, and we interactively update the imagery sent to the array so that imagery directed to each viewer appears from a consistent and correct vertical perspective. Unlike previous work, we do not assume that the imagery sent to each projector in the array is rendered from a single vertical perspective. This lets us apply hybrid parallax to displays where a single projector forms parts of multiple viewers’ imagery. Thus, each individual projected image is rendered with multiple centers of projection, and might show an object from above on the left and from below on the right. We demonstrate this technique using a dense horizontal array of pico-projectors aimed into an anisotropic vertical diffusion screen, yielding 1.5 deg angular resolution over 110 deg field of view. To create a seamless viewing experience for multiple viewers, we smoothly interpolate the set of viewer heights and distances on a per-vertex basis across the array’s field of view, reducing image distortion, cross talk, and artifacts from tracking errors.},
keywords = {DTIC, Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Smith, Stephen; Traum, David; Alexander, Oleg; Leuski, Anton; Jones, Andrew; Georgila, Kallirroi; Debevec, Paul; Swartout, William; Maio, Heather
Time-offset Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of IUI 2014, pp. 163–168, ACM Press, Haifa, Israel, 2014, ISBN: 978-1-4503-2184-6.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, Virtual Humans
@inproceedings{artstein_time-offset_2014,
title = {Time-offset Interaction with a Holocaust Survivor},
author = {Ron Artstein and Stephen Smith and David Traum and Oleg Alexander and Anton Leuski and Andrew Jones and Kallirroi Georgila and Paul Debevec and William Swartout and Heather Maio},
url = {http://ict.usc.edu/pubs/Time-Offset%20Interaction%20with%20a%20Holocaust%20Survivor.pdf},
doi = {10.1145/2557500.2557540},
isbn = {978-1-4503-2184-6},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of IUI 2014},
pages = {163–168},
publisher = {ACM Press},
address = {Haifa, Israel},
abstract = {Time-offset interaction is a new technology that allows for two-way communication with a person who is not available for conversation in real time: a large set of statements are prepared in advance, and users access these statements through natural conversation that mimics face-to-face interaction. Conversational reactions to user questions are retrieved through a statistical classifier, using technology that is similar to previous interactive systems with synthetic characters; however, all of the retrieved utterances are genuine statements by a real person. Recordings of answers, listening and idle behaviors, and blending techniques are used to create a persistent visual image of the person throughout the interaction. A proof-of-concept has been implemented using the likeness of Pinchas Gutter, a Holocaust survivor, enabling short conversations about his family, his religious views, and resistance. This proof-of-concept has been shown to dozens of people, from school children to Holocaust scholars, with many commenting on the impact of the experience and potential for this kind of interface.},
keywords = {DTIC, Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2013
Nagano, Koki; Jones, Andrew; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
An Autostereoscopic Projector Array Optimized for 3D Facial Display Proceedings Article
In: SIGGRAPH 2013 Emerging Technologies, 2013.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{nagano_autostereoscopic_2013,
title = {An Autostereoscopic Projector Array Optimized for 3D Facial Display},
author = {Koki Nagano and Andrew Jones and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Autostereoscopic%20Projector%20Array%20Optimized%20for%203D%20Facial%20Display%20.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH 2013 Emerging Technologies},
abstract = {Video projectors are rapidly shrinking in size, power consumption, and cost. Such projectors provide unprecedented flexibility to stack, arrange, and aim pixels without the need for moving parts. This dense projector display is optimized in size and resolution to display an autostereoscopic life-sized 3D human face. It utilizes 72 Texas Instruments PICO projectors to illuminate a 30 cm x 30 cm anisotropic screen with a wide 110-degree field of view. The demonstration includes both live scanning of subjects and virtual animated characters.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Graham, Paul; Nagano, Koki; Busch, Jay; Debevec, Paul
Driving High-Resolution Facial Blendshapes with Video Performance Capture Proceedings Article
In: SIGGRAPH, Anaheim, CA, 2013.
Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{fyffe_driving_2013,
title = {Driving High-Resolution Facial Blendshapes with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Graham and Koki Nagano and Jay Busch and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Blendshapes%20with%20Video%20Performance.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH},
address = {Anaheim, CA},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Alexander, Oleg; Busch, Jay; Graham, Paul; Tunwattanapong, Borom; Jones, Andrew; Nagano, Koki; Ichikari, Ryosuke; Debevec, Paul; Fyffe, Graham
Digital Ira: High-Resolution Facial Performance Playback Proceedings Article
In: SIGGRAPH 2013 Real-Time Live! The 40th International Conference and Exhibition on Computer Graphics and Interactive Techniques, Anaheim, CA, 2013.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{alexander_digital_2013,
title = {Digital Ira: High-Resolution Facial Performance Playback},
author = {Oleg Alexander and Jay Busch and Paul Graham and Borom Tunwattanapong and Andrew Jones and Koki Nagano and Ryosuke Ichikari and Paul Debevec and Graham Fyffe},
url = {http://gl.ict.usc.edu/Research/DigitalIra/},
doi = {10.1145/2503385.2503387},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH 2013 Real-Time Live! The 40th International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Anaheim, CA},
abstract = {In this collaboration between Activision and USC ICT, we tried to create a real-time, photoreal digital human character which could be seen from any viewpoint, any lighting, and could perform realistically from video performance capture even in a tight closeup. In addition, we needed this to run in a game-ready production pipeline. To achieve this, we scanned the actor in thirty high-resolution expressions using the USC ICT's new Light Stage X system [Ghosh et al. SIGGRAPHAsia2011] and chose eight expressions for the real-time performance rendering. To record the performance, we shot multi-view 30fps video of the actor performing improvised lines using the same multi-camera rig. We used a new tool called Vuvuzela to interactively and precisely correspond all expression (u,v)'s to the neutral expression, which was retopologized to an artist mesh. Our new offline animation solver works by creating a performance graph representing dense GPU optical flow between the video frames and the eight expressions. This graph gets pruned by analyzing the correlation between the video frames and the expression scans over twelve facial regions. The algorithm then computes dense optical flow and 3D triangulation yielding per-frame spatially varying blendshape weights approximating the performance.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Tunwattanapong, Borom; Fyffe, Graham; Graham, Paul; Busch, Jay; Yu, Xueming; Ghosh, Abhijeet; Debevec, Paul
Acquiring Reflectance and Shape from Continuous Spherical Harmonic Illumination Journal Article
In: ACM Transactions on Graphics, vol. 32, no. 4, 2013, ISSN: 07300301.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@article{tunwattanapong_acquiring_2013,
title = {Acquiring Reflectance and Shape from Continuous Spherical Harmonic Illumination},
author = {Borom Tunwattanapong and Graham Fyffe and Paul Graham and Jay Busch and Xueming Yu and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Acquiring%20Re%ef%ac%82ectance%20and%20Shape%20from%20Continuous%20Spherical%20Harmonic%20Illumination.pdf},
doi = {10.1145/2461912.2461944},
issn = {07300301},
year = {2013},
date = {2013-07-01},
journal = {ACM Transactions on Graphics},
volume = {32},
number = {4},
abstract = {We present a novel technique for acquiring the geometry and spatially-varying reflectance properties of 3D objects by observing them under continuous spherical harmonic illumination conditions. The technique is general enough to characterize either entirely specular or entirely diffuse materials, or any varying combination across the surface of the object. We employ a novel computational illumination setup consisting of a rotating arc of controllable LEDs which sweep out programmable spheres of incident illumination during 1-second exposures. We illuminate the object with a succession of spherical harmonic illumination conditions, as well as photographed environmental lighting for validation. From the response of the object to the harmonics, we can separate diffuse and specular reflections, estimate world-space diffuse and specular normals, and compute anisotropic roughness parameters for each view of the object. We then use the maps of both diffuse and specular reflectance to form correspondences in a multiview stereo algorithm, which allows even highly specular surfaces to be corresponded across views. The algorithm yields a complete 3D model and a set of merged reflectance maps. We use this technique to digitize the shape and reflectance of a variety of objects difficult to acquire with other techniques and present validation renderings which match well to photographs in similar lighting.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Alexander, Oleg; Fyffe, Graham; Busch, Jay; Yu, Xueming; Ichikari, Ryosuke; Jones, Andrew; Debevec, Paul; Jimenez, Jorge; Danvoye, Etienne; Antionazzi, Bernardo; Eheler, Mike; Kysela, Zybnek; Pahlen, Javier
Digital Ira: Creating a Real-Time Photoreal Digital Actor Proceedings Article
In: SIGGRAPH Real Time Live!, Anaheim, CA, 2013, ISBN: 978-1-4503-2342-0.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{alexander_digital_2013-1,
title = {Digital Ira: Creating a Real-Time Photoreal Digital Actor},
author = {Oleg Alexander and Graham Fyffe and Jay Busch and Xueming Yu and Ryosuke Ichikari and Andrew Jones and Paul Debevec and Jorge Jimenez and Etienne Danvoye and Bernardo Antionazzi and Mike Eheler and Zybnek Kysela and Javier Pahlen},
url = {http://dl.acm.org/citation.cfm?doid=2503385.2503387},
doi = {10.1145/2503385.2503387},
isbn = {978-1-4503-2342-0},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH Real Time Live!},
address = {Anaheim, CA},
abstract = {In 2008, the "Digital Emily" project [Alexander et al. 2009] showed how a set of high-resolution facial expressions scanned in a light stage could be rigged into a real-time photoreal digital character and driven with video-based facial animation techniques. However, Digital Emily was rendered offline, involved just the front of the face, and was never seen in a tight closeup. In this collaboration between Activision and USC ICT shown at SIGGRAPH 2013's Real-Time Live venue, we endeavoured to create a real-time, photoreal digital human character which could be seen from any viewpoint, in any lighting, and could perform realistically from video performance capture even in a tight closeup. In addition, we wanted this to run in a real-time game-ready production pipeline, ultimately achieving 180 frames per second for a full-screen character on a two-year old graphics card.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Proceedings Article
In: EUROGRAPHICS, Girona, Spain, 2013.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{graham_measurement-based_2013,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2013},
date = {2013-05-01},
booktitle = {EUROGRAPHICS},
address = {Girona, Spain},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2012
Debevec, Paul
The Light Stages and Their Applications to Photoreal Digital Actors Proceedings Article
In: SIGGRAPH Asia, Singapore, 2012.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@inproceedings{debevec_light_2012,
title = {The Light Stages and Their Applications to Photoreal Digital Actors},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/The%20Light%20Stages%20and%20Their%20Applications%20to%20Photoreal%20Digital%20Actors.pdf},
year = {2012},
date = {2012-11-01},
booktitle = {SIGGRAPH Asia},
address = {Singapore},
abstract = {The Light Stage systems built at UC Berkeley and USC ICT have enabled a variety of facial scanning and reflectance measurement techniques that have been explored in several research papers and used in various commercial applications. This short paper presents the evolutionary history of the Light Stage Systems and some of the techniques and applications they have enabled.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2012, 2012.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, UARC
@techreport{graham_measurement-based_2012,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2012.pdf},
year = {2012},
date = {2012-11-01},
number = {ICT TR 01 2012},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a technique for generating microstructure-level facial geometry by augmenting a mesostructure-level facial scan with detail synthesized from a set of exemplar skin patches scanned at much higher resolution. We use constrained texture synthesis based on image analogies to increase the resolution of the facial scan in a way that is consistent with the scanned mesostructure. We digitize the exemplar patches with a polarization-based computational illumination technique which considers specular reflection and single scattering. The recorded microstructure patches can be used to synthesize full-facial microstructure detail for either the same subject or to a different subject. We show that the technique allows for greater realism in facial renderings including more accurate reproduction of skin’s specular roughness and anisotropic reflection effects.},
keywords = {DTIC, Graphics, UARC},
pubstate = {published},
tppubtype = {techreport}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Proceedings Article
In: ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12), Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{graham_measurement-based_2012-1,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/A%20Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12)},
address = {Los Angeles, CA},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Yufeng; Peers, Pieter; Debevec, Paul; Ghosh, Abhijeet
Estimating Diffusion Parameters from Polarized Spherical Gradient Illumination Proceedings Article
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), 2012.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{zhu_estimating_2012,
title = {Estimating Diffusion Parameters from Polarized Spherical Gradient Illumination},
author = {Yufeng Zhu and Pieter Peers and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Estimating%20Diffusion%20Parameters%20from%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
abstract = {Accurately modeling and reproducing the appearance of real-world materials is crucial for the production of photoreal imagery of digital scenes and subjects. The appearance of many common materials is the result of subsurface light transport that gives rise to the characteristic “soft” appearance and the unique coloring of such materials. Jensen et al. [2001] introduced the dipole-diffusion approximation to efficiently model isotropic subsurface light transport. The scattering parameters needed to drive the dipole-diffusion approximation are typically estimated by illuminating a homogeneous surface patch with a collimated beam of light, or in the case of spatially varying translucent materials with a dense set of structured light patterns. A disadvantage of most existing techniques is that acquisition time is traded off with spatial density of the scattering parameters.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Graham, Paul; Busch, Jay; Bolas, Mark
A Cell Phone Based Platform for Facial Performance Capture Proceedings Article
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, MxR
@inproceedings{debevec_cell_2012,
title = {A Cell Phone Based Platform for Facial Performance Capture},
author = {Paul Debevec and Paul Graham and Jay Busch and Mark Bolas},
url = {http://ict.usc.edu/pubs/A%20Cell%20Phone%20Based%20Platform%20for%20Facial%20Performance%20Capture.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
address = {Los Angeles, CA},
abstract = {We demonstrate a novel light probe which can estimate the full dynamic range of a scene with multiple bright light sources. It places diffuse strips between mirrored spherical quadrants, effectively co-locating diffuse and mirrored probes to record the full dynamic range of illumination in a single exposure. From this image, we estimate the intensity of multiple saturated light sources by solving a linear system.},
keywords = {DTIC, Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Busch, Jay; Bolas, Mark; Debevec, Paul
A Single-Shot Light Probe Proceedings Article
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, MxR
@inproceedings{graham_single-shot_2012,
title = {A Single-Shot Light Probe},
author = {Paul Graham and Jay Busch and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Single-Shot%20Light%20Probe.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
address = {Los Angeles, CA},
abstract = {We demonstrate a novel light probe which can estimate the full dynamic range of a scene with multiple bright light sources. It places diffuse strips between mirrored spherical quadrants, effectively co-locating diffuse and mirrored probes to record the full dynamic range of illumination in a single exposure. From this image, we estimate the intensity of multiple saturated light sources by solving a linear system.},
keywords = {DTIC, Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Meseth, Jan; Hempel, Shawn; Weidlich, Andrea; Fyffe, Graham; Miller, Craig; Carroll, Paul; Debevec, Paul; Fyffe, Lynn
Improved Linear-Light-Source Material Reflectance Scanning Proceedings Article
In: ACM SIGGRAPH 2012 Talks, 2012.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{meseth_improved_2012,
title = {Improved Linear-Light-Source Material Reflectance Scanning},
author = {Jan Meseth and Shawn Hempel and Andrea Weidlich and Graham Fyffe and Craig Miller and Paul Carroll and Paul Debevec and Lynn Fyffe},
url = {http://ict.usc.edu/pubs/Improved%20Linear-Light-Source%20Material%20Reflectance%20Scanning.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM SIGGRAPH 2012 Talks},
abstract = {We improve the resolution, accuracy, and efficiency of Linear Light Source (LLS) Reflectometry with several acquisition setup and data processing improvements, allowing spatiallyvarying reflectance parameters of complex materials to be recorded with unprecedented accuracy and efficiency.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Jurik, Joel; Burnett, Thomas; Klug, Michael; Debevec, Paul
Geometry-Corrected Light Field Rendering for Creating a Holographic Stereogram Proceedings Article
In: CVPR Workshop for Computational Cameras and Displays, Providence, RI, 2012.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{jurik_geometry-corrected_2012,
title = {Geometry-Corrected Light Field Rendering for Creating a Holographic Stereogram},
author = {Joel Jurik and Thomas Burnett and Michael Klug and Paul Debevec},
url = {http://ict.usc.edu/pubs/Geometry-Corrected%20Light%20Field%20Rendering%20for%20Creating%20a%20Holographic%20Stereogram.pdf},
year = {2012},
date = {2012-06-01},
booktitle = {CVPR Workshop for Computational Cameras and Displays},
address = {Providence, RI},
abstract = {We present a technique to record and process a light field of an object in order to produce a printed holographic stereogram. We use a geometry correction process to maximize the depth of field and depth-dependent surface detail even when the array of viewpoints comprising the light field is coarsely sampled with respect to the angular resolution of the printed hologram. We capture the light field data of an object with a digital still camera attached to a 2D translation stage, and generate hogels (holographic elements) for printing by reprojecting the light field onto a photogrammetrically recovered model of the object and querying the relevant rays to be produced by the hologram with respect to this geometry. This results in a significantly clearer image of detail at different depths in the printed holographic stereogram.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Wang, Yi-Hua; Fyffe, Graham; Chen, Bing-Yu; Debevec, Paul
A blendshape model that incorporates physical interaction Journal Article
In: Computer Animation and Virtual Worlds, vol. 23, no. 3-4, pp. 235–243, 2012.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@article{ma_blendshape_2012,
title = {A blendshape model that incorporates physical interaction},
author = {Wan-Chun Ma and Yi-Hua Wang and Graham Fyffe and Bing-Yu Chen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20blendshape%20model%20that%20incorporates%20physical%20interaction-2.pdf},
doi = {10.1002/cav.1441},
year = {2012},
date = {2012-05-01},
journal = {Computer Animation and Virtual Worlds},
volume = {23},
number = {3-4},
pages = {235–243},
abstract = {The linear blendshape technique has been intensively used for computer animation and games because of its simplicity and effectiveness. However, it cannot describe rotational deformations and deformations because of self collision or scene interaction. In this paper, we present a new technique to address these two major limitations by introducing physical-based simulation to blendshapes. The proposed technique begins by constructing a mass–spring system for each blendshape target. Each system is initialized in its steady state by setting the rest length of each spring as the edge length of the corresponding target. To begin shape interpolation, we linearly interpolate the rest lengths of the springs according to a given interpolation factor α ∈ [0,1]. The interpolated shape is then generated by computing the equilibrium of the mass–spring system with the interpolated rest lengths. Results from our technique show physically plausible deformations even in the case of large rotations between blendshape targets. In addition, the new blendshape model is able to interact with other scene elements by introducing collision detection and handling to the mass–spring system.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {article}
}
Guarnera, Giuseppe Claudio; Peers, Pieter; Debevec, Paul; Ghosh, Abhijeet
Estimating Surface Normals from Spherical Stokes Reflectance Fields Proceedings Article
In: ECCV Workshop on Color and Photometry in Computer Vision (CPCV), Firenze, Italy, 2012.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{guarnera_estimating_2012,
title = {Estimating Surface Normals from Spherical Stokes Reflectance Fields},
author = {Giuseppe Claudio Guarnera and Pieter Peers and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Estimating%20Surface%20Normals%20from%20Spherical%20Stokes%20Reflectance%20Fields.pdf},
year = {2012},
date = {2012-03-01},
booktitle = {ECCV Workshop on Color and Photometry in Computer Vision (CPCV)},
address = {Firenze, Italy},
abstract = {In this paper we introduce a novel technique for estimating surface normals from the four Stokes polarization parameters of specularly reflected light under a single spherical incident lighting condition that is either unpolarized or circularly polarized. We illustrate the practicality of our technique by estimating surface normals under uncontrolled outdoor illumination from just four observations from a fixed viewpoint.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
2011
Ma, Wan-Chun; Wang, Yi-Hua; Fyffe, Graham; Barbic, Jernej; Chen, Bing-Yu; Debevec, Paul
A blendshape model that incorporates physical interaction Proceedings Article
In: SIGGRAPH Asia, Hong Kong, 2011, ISBN: 978-1-4503-1137-3.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{ma_blendshape_2011,
title = {A blendshape model that incorporates physical interaction},
author = {Wan-Chun Ma and Yi-Hua Wang and Graham Fyffe and Jernej Barbic and Bing-Yu Chen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20blendshape%20model%20that%20incorporates%20physical%20interaction.pdf},
doi = {10.1145/2073304.2073343},
isbn = {978-1-4503-1137-3},
year = {2011},
date = {2011-12-01},
booktitle = {SIGGRAPH Asia},
address = {Hong Kong},
abstract = {We present a new technique for physically-plausible shape blending by interpolating the spring rest length parameters of a mass-spring system. This blendshape method begins by constructing two consistent mass-spring systems (i.e., with vertex-wise correspondence and the same topology) for source and target shapes, respectively, and setting the two systems as in their static states. In other words, their edge lengths equal to the rest lengths of the springs. To create an intermediate pose, we generate a new mass-spring system consistent with the source and target ones and set its rest lengths as linearly interpolated between source and target based on an interpolation factor α ε [0, 1]. The new pose is then synthesized by computing the equilibrium given the interpolated rest lengths. In addition, the mass-spring system may interact with other objects in the environment by incorporating collision detection.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Fyffe, Graham; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Debevec, Paul
Multiview Face Capture using Polarized Spherical Gradient Illumination Proceedings Article
In: Proceedings of SIGGRAPH Asia 2011/ACM Trans. on Graphics, 2011.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{ghosh_multiview_2011,
title = {Multiview Face Capture using Polarized Spherical Gradient Illumination},
author = {Abhijeet Ghosh and Graham Fyffe and Borom Tunwattanapong and Jay Busch and Xueming Yu and Paul Debevec},
url = {http://ict.usc.edu/pubs/Multiview%20Face%20Capture%20using%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2011},
date = {2011-12-01},
booktitle = {Proceedings of SIGGRAPH Asia 2011/ACM Trans. on Graphics},
volume = {30(6)},
abstract = {We present a novel process for acquiring detailed facial geometry with high resolution diffuse and specular photometric information from multiple viewpoints using polarized spherical gradient illumination. Key to our method is a new pair of linearly polarized lighting patterns which enables multiview diffuse-specular separation under a given spherical illumination condition from just two photographs. The patterns – one following lines of latitude and one following lines of longitude – allow the use of fixed linear polarizers in front of the cameras, enabling more efficient acquisition of diffuse and specular albedo and normal maps from multiple viewpoints. In a second step, we employ these albedo and normal maps as input to a novel multi-resolution adaptive domain message passing stereo reconstruction algorithm to create high resolution facial geometry. To do this, we formulate the stereo reconstruction from multiple cameras in a commonly parameterized domain for multiview reconstruction. We show competitive results consisting of high-resolution facial geometry with relightable reflectance maps using five DSLR cameras. Our technique scales well for multiview acquisition without requiring specialized camera systems for sensing multiple polarization states.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Fyffe, Graham; Yu, Xueming; Ma, Wan-Chun; Busch, Jay; Ichikari, Ryosuke; Bolas, Mark; Debevec, Paul
Head-mounted Photometric Stereo for Performance Capture Proceedings Article
In: 8th European Conference on Visual Media Production (CVMP 2011), London, UK, 2011.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, MxR
@inproceedings{jones_head-mounted_2011,
title = {Head-mounted Photometric Stereo for Performance Capture},
author = {Andrew Jones and Graham Fyffe and Xueming Yu and Wan-Chun Ma and Jay Busch and Ryosuke Ichikari and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Head-mounted%20Photometric%20Stereo%20for%20Performance%20Capture.pdf},
year = {2011},
date = {2011-11-01},
booktitle = {8th European Conference on Visual Media Production (CVMP 2011)},
address = {London, UK},
abstract = {Head-mounted cameras are an increasingly important tool for capturing facial performances to drive virtual characters. They provide a fixed, unoccluded view of the face, useful for observing motion capture dots or as input to video analysis. However, the 2D imagery captured with these systems is typically affected by ambient light and generally fails to record subtle 3D shape changes as the face performs. We have developed a system that augments a head-mounted camera with LED-based photometric stereo. The system allows observation of the face independent of the ambient light and generates per-pixel surface normals so that the performance is recorded dynamically in 3D. The resulting data can be used for facial relighting or as better input to machine learning algorithms for driving an animated face.},
keywords = {DTIC, Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Ghosh, Abhijeet; Debevec, Paul; Morency, Louis-Philippe
Exploring the effect of illumination on automatic expression recognition using the ICT-3DRFE database Journal Article
In: Image and Vision Computing, 2011, ISSN: 0262-8856.
Abstract | Links | BibTeX | Tags: DTIC
@article{stratou_exploring_2011,
title = {Exploring the effect of illumination on automatic expression recognition using the ICT-3DRFE database},
author = {Giota Stratou and Abhijeet Ghosh and Paul Debevec and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Exploring%20the%20effect%20of%20illumination%20on%20automatic%20expression%20recognition%20using%20the%20ICT-3DRFE%20database.pdf},
issn = {0262-8856},
year = {2011},
date = {2011-11-01},
journal = {Image and Vision Computing},
abstract = {One of the main challenges in facial expression recognition is illumination invariance. Our long-term goal is to develop a system for automatic facial expression recognition that is robust to light variations. In this paper, we introduce a novel 3D Relightable Facial Expression (ICT-3DRFE) database that enables experimentation in the fields of both computer graphics and computer vision. The database contains 3D models for 23 subjects and 15 expressions, as well as photometric information that allow for photorealistic rendering. It is also facial action units annotated, using FACS standards. Using the ICT-3DRFE database we create an image set of different expressions/illuminations to study the effect of illumination on automatic expression recognition. We compared the output scores from automatic recognition with expert FACS annotations and found that they agree when the illumination is uniform. Our results show that the output distribution of the automatic recognition can change significantly with light variations and sometimes causes the discrimination of two different expressions to be diminished. We propose a ratio-based light transfer method, to factor out unwanted illuminations from given images and show that it reduces the effect of illumination on expression recognition.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Practical Image-Based Relighting and Editing with Spherical-Harmonics and Local Lights Proceedings Article
In: European Conference on Visual Media and Production (CVMP), 2011.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{tunwattanapong_practical_2011,
title = {Practical Image-Based Relighting and Editing with Spherical-Harmonics and Local Lights},
author = {Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Practical%20Image%20Based%20Relighting%20and%20Editing%20with%20Spherical%20Harmonics%20and%20Local%20Lights.pdf},
year = {2011},
date = {2011-11-01},
booktitle = {European Conference on Visual Media and Production (CVMP)},
abstract = {We present a practical technique for image-based relighting under environmental illumination which greatly reduces the number of required photographs compared to traditional techniques, while still achieving high quality editable relighting results. The proposed method employs an optimization procedure to combine spherical harmonics, a global lighting basis, with a set of local lights. Our choice of lighting basis captures both low and high frequency components of typical surface reflectance functions while generating close approximations to the ground truth with an order of magnitude less data. This technique benefits the acquisition process by reducing the number of required photographs, while simplifying the modification of reflectance data and enabling artistic lighting edits for post-production effects. Here, we demonstrate two desirable lighting edits, modifying light intensity and angular width, employing the proposed lighting basis.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Wilson, Cyrus A.; Alexander, Oleg; Tunwattanapong, Borom; Peers, Pieter; Ghosh, Abhijeet; Busch, Jay; Hartholt, Arno; Debevec, Paul
Facial Cartography: Interactive Scan Correspondence Proceedings Article
In: ACM/Eurographics Symposium on Computer Animation, 2011.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, Virtual Humans
@inproceedings{wilson_facial_2011,
title = {Facial Cartography: Interactive Scan Correspondence},
author = {Cyrus A. Wilson and Oleg Alexander and Borom Tunwattanapong and Pieter Peers and Abhijeet Ghosh and Jay Busch and Arno Hartholt and Paul Debevec},
url = {http://ict.usc.edu/pubs/Facial%20Cartography-%20Interactive%20Scan%20Correspondence.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {ACM/Eurographics Symposium on Computer Animation},
abstract = {We present a semi-automatic technique for computing surface correspondences between 3D facial scans in different expressions, such that scan data can be mapped into a common domain for facial animation. The technique can accurately correspond high-resolution scans of widely differing expressions – without requiring intermediate pose sequences – such that they can be used, together with reflectance maps, to create high-quality blendshape-based facial animation. We optimize correspondences through a combination of Image, Shape, and Internal forces, as well as Directable forces to allow a user to interactively guide and refine the solution. Key to our method is a novel representation, called an Active Visage, that balances the advantages of both deformable templates and correspondence computation in a 2D canonical domain. We show that our semi-automatic technique achieves more robust results than automated correspondence alone, and is more precise than is practical with unaided manual input.},
keywords = {DTIC, Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Fyffe, Graham; Debevec, Paul
Optimized Local Blendshape Mapping for Facial Motion Retargeting Proceedings Article
In: SIGGRAPH 2011, Vancouver, Canada, 2011.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{ma_optimized_2011,
title = {Optimized Local Blendshape Mapping for Facial Motion Retargeting},
author = {Wan-Chun Ma and Graham Fyffe and Paul Debevec},
url = {http://ict.usc.edu/pubs/Optimized%20Local%20Blendshape%20Mapping%20for%20Facial%20Motion%20Retargeting.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {SIGGRAPH 2011},
address = {Vancouver, Canada},
abstract = {One of the popular methods for facial motion retargeting is local blendshape mapping [Pighin and Lewis 2006], where each local facial region is controlled by a tracked feature (for example, a vertex in motion capture data). To map a target motion input onto blendshapes, a pose set is chosen for each facial region with minimal retargeting error. However, since the best pose set for each region is chosen independently, the solution likely has unorganized pose sets across the face regions, as shown in Figure 1(b). Therefore, even though every pose set matches the local features, the retargeting result is not guaranteed to be spatially smooth. In addition, previous methods ignored temporal coherence which is key for jitter-free results.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Hawkins, Tim; Watts, Chris; Ma, Wan-Chun; Debevec, Paul
Comprehensive Facial Performance Capture Proceedings Article
In: Eurographics 2011, 2011.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{fyffe_comprehensive_2011,
title = {Comprehensive Facial Performance Capture},
author = {Graham Fyffe and Tim Hawkins and Chris Watts and Wan-Chun Ma and Paul Debevec},
url = {http://ict.usc.edu/pubs/Comprehensive%20Facial%20Performance%20Capture.pdf},
year = {2011},
date = {2011-04-01},
booktitle = {Eurographics 2011},
abstract = {We present a system for recording a live dynamic facial performance, capturing highly detailed geometry and spatially varying diffuse and specular reflectance information for each frame of the performance. The result is a reproduction of the performance that can be rendered from novel viewpoints and novel lighting conditions, achieving photorealistic integration into any virtual environment. Dynamic performances are captured directly, without the need for any template geometry or static geometry scans, and processing is completely automatic, requiring no human input or guidance. Our key contributions are a heuristic for estimating facial reflectance information from gradient illumination photographs, and a geometry optimization framework that maximizes a principled likelihood function combining multi-view stereo correspondence and photometric stereo, using multi-resolution belief propagation. The output of our system is a sequence of geometries and reflectance maps, suitable for rendering in off-the-shelf software. We show results from our system rendered under novel viewpoints and lighting conditions, and validate our results by demonstrating a close match to ground truth photographs.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ko-Yun; Ma, Wan-Chun; Chang, Chun-Fa; Wang, Chuan-Chang; Debevec, Paul
A framework for locally retargeting and rendering facial performance Proceedings Article
In: Computer Animation and Virtual Worlds, pp. 159–167, 2011.
Abstract | Links | BibTeX | Tags: DTIC, Graphics
@inproceedings{liu_framework_2011,
title = {A framework for locally retargeting and rendering facial performance},
author = {Ko-Yun Liu and Wan-Chun Ma and Chun-Fa Chang and Chuan-Chang Wang and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Framework%20for%20Locally%20Retargeting%20and%20Rendering%20Facial%20Performance.pdf},
year = {2011},
date = {2011-04-01},
booktitle = {Computer Animation and Virtual Worlds},
volume = {22},
pages = {159–167},
abstract = {We present a facial motion retargeting method that enables the control of a blendshape rig according to marker-based motion capture data. The main purpose of the proposed technique is to allow a blendshape rig to create facial expressions, which conforms best to the current motion capture input, regardless the underlying blendshape poses. In other words, even though all of the blendshape poses may comprise symmetrical facial expressions only, our method is still able to create asymmetrical expressions without physically splitting any of them into more local blendshape poses. An automatic segmentation technique based on the analysis of facial motion is introduced to create facial regions for local retargeting. We also show that it is possible to blend normal maps for rendering in the same framework. Rendering with the blended normal map significantly improves surface appearance and details.},
keywords = {DTIC, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}