Publications
Search
Saito, Shunsuke; Hu, Liwen; Ma, Chongyang; Ibayashi, Hikaru; Luo, Linjie; Li, Hao
3D Hair Synthesis Using Volumetric Variational Autoencoders Proceedings Article
In: SIGGRAPH Asia 2018 Technical Papers on - SIGGRAPH Asia '18, pp. 1–12, ACM Press, Tokyo, Japan, 2018, ISBN: 978-1-4503-6008-1.
@inproceedings{saito_3d_2018,
title = {3D Hair Synthesis Using Volumetric Variational Autoencoders},
author = {Shunsuke Saito and Liwen Hu and Chongyang Ma and Hikaru Ibayashi and Linjie Luo and Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=3272127.3275019},
doi = {10.1145/3272127.3275019},
isbn = {978-1-4503-6008-1},
year = {2018},
date = {2018-12-01},
booktitle = {SIGGRAPH Asia 2018 Technical Papers on - SIGGRAPH Asia '18},
pages = {1–12},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {Recent advances in single-view 3D hair digitization have made the creation of high-quality CG characters scalable and accessible to end-users, enabling new forms of personalized VR and gaming experiences. To handle the complexity and variety of hair structures, most cutting-edge techniques rely on the successful retrieval of a particular hair model from a comprehensive hair database. Not only are the aforementioned data-driven methods storage intensive, but they are also prone to failure for highly unconstrained input images, complicated hairstyles, and failed face detection. Instead of using a large collection of 3D hair models directly, we propose to represent the manifold of 3D hairstyles implicitly through a compact latent space of a volumetric variational autoencoder (VAE). This deep neural network is trained with volumetric orientation field representations of 3D hair models and can synthesize new hairstyles from a compressed code. To enable end-to-end 3D hair inference, we train an additional embedding network to predict the code in the VAE latent space from any input image. Strand-level hairstyles can then be generated from the predicted volumetric representation. Our fully automatic framework does not require any ad-hoc face fitting, intermediate classification and segmentation, or hairstyle database retrieval. Our hair synthesis approach is significantly more robust and can handle a much wider variation of hairstyles than state-of-the-art data-driven hair modeling techniques with challenging inputs, including photos that are low-resolution, overexposured, or contain extreme head poses. The storage requirements are minimal and a 3D hair model can be produced from an image in a second. Our evaluations also show that successful reconstructions are possible from highly stylized cartoon images, non-human subjects, and pictures taken from behind a person. Our approach is particularly well suited for continuous and plausible hair interpolation between very different hairstyles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wei, Lingyu; Hu, Liwen; Kim, Vladimir; Yumer, Ersin; Li, Hao
Real-Time Hair Rendering using Sequential Adversarial Networks Proceedings Article
In: Proceedings of the 15th European Conference on Computer Vision, Computer Vision Foundation, Munich, Germany, 2018.
@inproceedings{wei_real-time_2018,
title = {Real-Time Hair Rendering using Sequential Adversarial Networks},
author = {Lingyu Wei and Liwen Hu and Vladimir Kim and Ersin Yumer and Hao Li},
url = {http://openaccess.thecvf.com/content_ECCV_2018/papers/Lingyu_Wei_Real-Time_Hair_Rendering_ECCV_2018_paper.pdf},
year = {2018},
date = {2018-09-01},
booktitle = {Proceedings of the 15th European Conference on Computer Vision},
publisher = {Computer Vision Foundation},
address = {Munich, Germany},
abstract = {We present an adversarial network for rendering photorealistic hair as an alternative to conventional computer graphics pipelines. Our deep learning approach does not require low-level parameter tuning nor ad-hoc asset design. Our method simply takes a strand-based 3D hair model as input and provides intuitive user-control for color and lighting through reference images. To handle the diversity of hairstyles and its appearance complexity, we disentangle hair structure, color, and illumination properties using a sequential GAN architecture and a semisupervised training approach. We also introduce an intermediate edge activation map to orientation field conversion step to ensure a successful CG-to-photoreal transition, while preserving the hair structures of the original input data. As we only require a feed-forward pass through the network, our rendering performs in real-time. We demonstrate the synthesis of photorealistic hair images on a wide range of intricate hairstyles and compare our technique with state-of-the-art hair rendering methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Song, Yuhang; Yang, Chao; Lin, Zhe; Liu, Xiaofeng; Li, Hao; Huang, Qin
Contextual Based Image Inpainting: Infer, Match and Translate Proceedings Article
In: Proceedings of the 15th European Conference on Computer Vision, Computer Vision Foundation, Munich, Germany, 2018.
@inproceedings{song_contextual_2018,
title = {Contextual Based Image Inpainting: Infer, Match and Translate},
author = {Yuhang Song and Chao Yang and Zhe Lin and Xiaofeng Liu and Hao Li and Qin Huang},
url = {http://openaccess.thecvf.com/content_ECCV_2018/papers/Yuhang_Song_Contextual_Based_Image_ECCV_2018_paper.pdf},
year = {2018},
date = {2018-09-01},
booktitle = {Proceedings of the 15th European Conference on Computer Vision},
publisher = {Computer Vision Foundation},
address = {Munich, Germany},
abstract = {We study the task of image inpainting, which is to fill in the missing region of an incomplete image with plausible contents. To this end, we propose a learning-based approach to generate visually coherent completion given a high-resolution image with missing components. In order to overcome the difficulty to directly learn the distribution of highdimensional image data, we divide the task into inference and translation as two separate steps and model each step with a deep neural network. We also use simple heuristics to guide the propagation of local textures from the boundary to the hole. We show that, by using such techniques, inpainting reduces to the problem of learning two image-feature translation functions in much smaller space and hence easier to train. We evaluate our method on several public datasets and show that we generate results of better visual quality than previous state-of-the-art methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zheng, Zerong; Yu, Tao; Li, Hao; Guo, Kaiwen; Dai, Qionghai; Fang, Lu; Liu, Yebin
HybridFusion: Real-Time Performance Capture Using a Single Depth Sensor and Sparse IMUs Proceedings Article
In: Proceedings of the 15th European Conference on Computer Vision, Computer Vision Foundation, Munich, Germany, 2018.
@inproceedings{zheng_hybridfusion_2018,
title = {HybridFusion: Real-Time Performance Capture Using a Single Depth Sensor and Sparse IMUs},
author = {Zerong Zheng and Tao Yu and Hao Li and Kaiwen Guo and Qionghai Dai and Lu Fang and Yebin Liu},
url = {http://openaccess.thecvf.com/content_ECCV_2018/papers/Zerong_Zheng_HybridFusion_Real-Time_Performance_ECCV_2018_paper.pdf},
year = {2018},
date = {2018-09-01},
booktitle = {Proceedings of the 15th European Conference on Computer Vision},
publisher = {Computer Vision Foundation},
address = {Munich, Germany},
abstract = {We propose a light-weight yet highly robust method for realtime human performance capture based on a single depth camera and sparse inertial measurement units (IMUs). Our method combines nonrigid surface tracking and volumetric fusion to simultaneously reconstruct challenging motions, detailed geometries and the inner human body of a clothed subject. The proposed hybrid motion tracking algorithm and efficient per-frame sensor calibration technique enable nonrigid surface reconstruction for fast motions and challenging poses with severe occlusions. Significant fusion artifacts are reduced using a new confidence measurement for our adaptive TSDF-based fusion. The above contributions are mutually beneficial in our reconstruction system, which enable practical human performance capture that is real-time, robust, low-cost and easy to deploy. Experiments show that extremely challenging performances and loop closure problems can be handled successfully.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhou, Yi; Hu, Liwen; Xing, Jun; Chen, Weikai; Kung, Han-Wei; Tong, Xin; Li, Hao
HairNet: Single-View Hair Reconstruction using Convolutional Neural Networks Proceedings Article
In: Proceedings of the 15th European Conference on Computer Vision, Computer Vision Foundation, Munich, Germany, 2018.
@inproceedings{zhou_hairnet_2018,
title = {HairNet: Single-View Hair Reconstruction using Convolutional Neural Networks},
author = {Yi Zhou and Liwen Hu and Jun Xing and Weikai Chen and Han-Wei Kung and Xin Tong and Hao Li},
url = {http://openaccess.thecvf.com/content_ECCV_2018/papers/Yi_Zhou_Single-view_Hair_Reconstruction_ECCV_2018_paper.pdf},
year = {2018},
date = {2018-09-01},
booktitle = {Proceedings of the 15th European Conference on Computer Vision},
publisher = {Computer Vision Foundation},
address = {Munich, Germany},
abstract = {We introduce a deep learning-based method to generate full 3D hair geometry from an unconstrained image. Our method can recover local strand details and has real-time performance. State-of-the-art hair modeling techniques rely on large hairstyle collections for nearest neighbor retrieval and then perform ad-hoc refinement. Our deep learning approach, in contrast, is highly efficient in storage and can run 1000 times faster while generating hair with 30K strands. The convolutional neural network takes the 2D orientation field of a hair image as input and generates strand features that are evenly distributed on the parameterized 2D scalp. We introduce a collision loss to synthesize more plausible hairstyles, and the visibility of each strand is also used as a weight term to improve the reconstruction accuracy. The encoder-decoder architecture of our network naturally provides a compact and continuous representation for hairstyles, which allows us to interpolate naturally between hairstyles. We use a large set of rendered synthetic hair models to train our network. Our method scales to real images because an intermediate 2D orientation field, automatically calculated from the real image, factors out the difference between synthetic and real hairs. We demonstrate the effectiveness and robustness of our method on a wide range of challenging real Internet pictures, and show reconstructed hair sequences from videos.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Zeng; Li, Tianye; Chen, Weikai; Zhao, Yajie; Xing, Jun; LeGendre, Chloe; Luo, Linjie; Ma, Chongyang; Li, Hao
Deep Volumetric Video From Very Sparse Multi-View Performance Capture Proceedings Article
In: Proceedings of the 15th European Conference on Computer Vision, Computer Vision Foundation, Munich, Germany, 2018.
@inproceedings{huang_deep_2018,
title = {Deep Volumetric Video From Very Sparse Multi-View Performance Capture},
author = {Zeng Huang and Tianye Li and Weikai Chen and Yajie Zhao and Jun Xing and Chloe LeGendre and Linjie Luo and Chongyang Ma and Hao Li},
url = {http://openaccess.thecvf.com/content_ECCV_2018/papers/Zeng_Huang_Deep_Volumetric_Video_ECCV_2018_paper.pdf},
year = {2018},
date = {2018-09-01},
booktitle = {Proceedings of the 15th European Conference on Computer Vision},
publisher = {Computer Vision Foundation},
address = {Munich, Germany},
abstract = {We present a deep learning based volumetric approach for performance capture using a passive and highly sparse multi-view capture system. State-of-the-art performance capture systems require either prescanned actors, large number of cameras or active sensors. In this work, we focus on the task of template-free, per-frame 3D surface reconstruction from as few as three RGB sensors, for which conventional visual hull or multi-view stereo methods fail to generate plausible results. We introduce a novel multi-view Convolutional Neural Network (CNN) that maps 2D images to a 3D volumetric field and we use this field to encode the probabilistic distribution of surface points of the captured subject. By querying the resulting field, we can instantiate the clothed human body at arbitrary resolutions. Our approach scales to different numbers of input images, which yield increased reconstruction quality when more views are used. Although only trained on synthetic data, our network can generalize to handle real footage from body performance capture. Our method is suitable for high-quality low-cost full body volumetric capture solutions, which are gaining popularity for VR and AR content creation. Experimental results demonstrate that our method is significantly more robust and accurate than existing techniques when only very sparse views are available.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yamaguchi, Shuco; Saito, Shunsuke; Nagano, Koki; Zhao, Yajie; Chen, Weikai; Olszewski, Kyle; Morishima, Shigeo; Li, Hao
High-fidelity facial reflectance and geometry inference from an unconstrained image Journal Article
In: ACM Transactions on Graphics, vol. 37, no. 4, pp. 1–14, 2018, ISSN: 07300301.
@article{yamaguchi_high-fidelity_2018,
title = {High-fidelity facial reflectance and geometry inference from an unconstrained image},
author = {Shuco Yamaguchi and Shunsuke Saito and Koki Nagano and Yajie Zhao and Weikai Chen and Kyle Olszewski and Shigeo Morishima and Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=3197517.3201364},
doi = {10.1145/3197517.3201364},
issn = {07300301},
year = {2018},
date = {2018-08-01},
journal = {ACM Transactions on Graphics},
volume = {37},
number = {4},
pages = {1–14},
abstract = {We present a deep learning-based technique to infer high-quality facial reflectance and geometry given a single unconstrained image of the subject, which may contain partial occlusions and arbitrary illumination conditions. The reconstructed high-resolution textures, which are generated in only a few seconds, include high-resolution skin surface reflectance maps, representing both the diffuse and specular albedo, and medium- and high-frequency displacement maps, thereby allowing us to render compelling digital avatars under novel lighting conditions. To extract this data, we train our deep neural networks with a high-quality skin reflectance and geometry database created with a state-of-the-art multi-view photometric stereo system using polarized gradient illumination. Given the raw facial texture map extracted from the input image, our neural networks synthesize complete reflectance and displacement maps, as well as complete missing regions caused by occlusions. The completed textures exhibit consistent quality throughout the face due to our network architecture, which propagates texture features from the visible region, resulting in high-fidelity details that are consistent with those seen in visible regions. We describe how this highly underconstrained problem is made tractable by dividing the full inference into smaller tasks, which are addressed by dedicated neural networks. We demonstrate the effectiveness of our network design with robust texture completion from images of faces that are largely occluded. With the inferred reflectance and geometry data, we demonstrate the rendering of high-fidelity 3D avatars from a variety of subjects captured under different lighting conditions. In addition, we perform evaluations demonstrating that our method can infer plausible facial reflectance and geometric details comparable to those obtained from high-end capture devices, and outperform alternative approaches that require only a single unconstrained input image.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
LeGendre, Chloe; Bladin, Kalle; Kishore, Bipin; Ren, Xinglei; Yu, Xueming; Debevec, Paul
Efficient Multispectral Facial Capture with Monochrome Cameras Proceedings Article
In: ACM SIGGRAPH 2018 Posters on - SIGGRAPH '18, ACM Press, Vancouver, British Columbia, Canada, 2018, ISBN: 978-1-4503-5817-0.
@inproceedings{legendre_efficient_2018,
title = {Efficient Multispectral Facial Capture with Monochrome Cameras},
author = {Chloe LeGendre and Kalle Bladin and Bipin Kishore and Xinglei Ren and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?doid=3230744.3230778},
doi = {10.1145/3230744.3230778},
isbn = {978-1-4503-5817-0},
year = {2018},
date = {2018-08-01},
booktitle = {ACM SIGGRAPH 2018 Posters on - SIGGRAPH '18},
publisher = {ACM Press},
address = {Vancouver, British Columbia, Canada},
abstract = {We propose a variant to polarized gradient illumination facial scanning which uses monochrome instead of color cameras to achieve more efficient and higher-resolution results. In typical polarized gradient facial scanning, sub-millimeter geometric detail is acquired by photographing the subject in eight or more polarized spherical gradient lighting conditions made with white LEDs, and RGB cameras are used to acquire color texture maps of the subject's appearance. In our approach, we replace the color cameras and white LEDs with monochrome cameras and multispectral, colored LEDs, leveraging that color images can be formed from successive monochrome images recorded under different illumination colors. While a naive extension of the scanning process to this setup would require multiplying the number of images by number of color channels, we show that the surface detail maps can be estimated directly from monochrome imagery, so that only an additional n photographs are required, where n is the number of added spectral channels. We also introduce a new multispectral optical flow approach to align images across spectral channels in the presence of slight subject motion. Lastly, for the case where a capture system's white light sources are polarized and its multispectral colored LEDs are not, we introduce the technique of multispectral polarization promotion, where we estimate the cross- and parallel-polarized monochrome images for each spectral channel from their corresponding images under a full sphere of even, unpolarized illumination. We demonstrate that this technique allows us to efficiently acquire a full color (or even multispectral) facial scan using monochrome cameras, unpolarized multispectral colored LEDs, and polarized white LEDs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Bo; He, Mingming; Liao, Jing; Sander, Pedro V; Yuan, Lu; Bermak, Amine; Chen, Dong
Deep Exemplar-Based Video Colorization Journal Article
In: ACM Transactions on Graphics, vol. 37, no. 4, pp. 10, 2018.
@article{zhang_deep_2018,
title = {Deep Exemplar-Based Video Colorization},
author = {Bo Zhang and Mingming He and Jing Liao and Pedro V Sander and Lu Yuan and Amine Bermak and Dong Chen},
url = {https://dl.acm.org/citation.cfm?id=3201365},
doi = {10.1145/3197517.3201365},
year = {2018},
date = {2018-08-01},
journal = {ACM Transactions on Graphics},
volume = {37},
number = {4},
pages = {10},
abstract = {This paper presents the first end-to-end network for exemplar-based video colorization. The main challenge is to achieve temporal consistency while remaining faithful to the reference style. To address this issue, we introduce a recurrent framework that unifies the semantic correspondence and color propagation steps. Both steps allow a provided reference image to guide the colorization of every frame, thus reducing accumulated propagation errors. Video frames are colorized in sequence based on the colorization history, and its coherency is further enforced by the temporal consistency loss. All of these components, learnt end-to-end, help produce realistic videos with good temporal stability. Experiments show our result is superior to the state-of-the-art methods both quantitatively and qualitatively.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Huynh, Loc; Chen, Weikai; Saito, Shunsuke; Xing, Jun; Nagano, Koki; Jones, Andrew; Debevec, Paul; Li, Hao
Mesoscopic Facial Geometry Inference Using Deep Neural Networks Proceedings Article
In: Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition, IEEE, Salt Lake City, UT, 2018.
@inproceedings{huynh_mesoscopic_2018,
title = {Mesoscopic Facial Geometry Inference Using Deep Neural Networks},
author = {Loc Huynh and Weikai Chen and Shunsuke Saito and Jun Xing and Koki Nagano and Andrew Jones and Paul Debevec and Hao Li},
url = {http://openaccess.thecvf.com/content_cvpr_2018/papers/Huynh_Mesoscopic_Facial_Geometry_CVPR_2018_paper.pdf},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition},
publisher = {IEEE},
address = {Salt Lake City, UT},
abstract = {We present a learning-based approach for synthesizing facial geometry at medium and fine scales from diffusely-lit facial texture maps. When applied to an image sequence, the synthesized detail is temporally coherent. Unlike current state-of-the-art methods [17, 5], which assume ”dark is deep”, our model is trained with measured facial detail collected using polarized gradient illumination in a Light Stage [20]. This enables us to produce plausible facial detail across the entire face, including where previous approaches may incorrectly interpret dark features as concavities such as at moles, hair stubble, and occluded pores. Instead of directly inferring 3D geometry, we propose to encode fine details in high-resolution displacement maps which are learned through a hybrid network adopting the state-of-the-art image-to-image translation network [29] and super resolution network [43]. To effectively capture geometric detail at both mid- and high frequencies, we factorize the learning into two separate sub-networks, enabling the full range of facial detail to be modeled. Results from our learning-based approach compare favorably with a high-quality active facial scanhening technique, and require only a single passive lighting condition without a complex scanning setup.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Tao; Zheng, Zerong; Guo, Kaiwen; Zhao, Jianhui; Dai, Qionghai; Li, Hao; Pons-Moll, Gerard; Liu, Yebin
DoubleFusion: Real-time Capture of Human Performances with Inner Body Shapes from a Single Depth Sensor Proceedings Article
In: Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition, IEEE, Salt Lake City, UT, 2018.
@inproceedings{yu_doublefusion_2018,
title = {DoubleFusion: Real-time Capture of Human Performances with Inner Body Shapes from a Single Depth Sensor},
author = {Tao Yu and Zerong Zheng and Kaiwen Guo and Jianhui Zhao and Qionghai Dai and Hao Li and Gerard Pons-Moll and Yebin Liu},
url = {http://openaccess.thecvf.com/content_cvpr_2018/CameraReady/1321.pdf},
year = {2018},
date = {2018-06-01},
booktitle = {Proceedings of the 31st IEEE International Conference on Computer Vision and Pattern Recognition},
publisher = {IEEE},
address = {Salt Lake City, UT},
abstract = {We propose DoubleFusion, a new real-time system that combines volumetric dynamic reconstruction with datadriven template fitting to simultaneously reconstruct detailed geometry, non-rigid motion and the inner human body shape from a single depth camera. One of the key contributions of this method is a double layer representation consisting of a complete parametric body shape inside, and a gradually fused outer surface layer. A pre-defined node graph on the body surface parameterizes the nonrigid deformations near the body, and a free-form dynamically changing graph parameterizes the outer surface layer far from the body, which allows more general reconstruction. We further propose a joint motion tracking method based on the double layer representation to enable robust and fast motion tracking performance. Moreover, the inner body shape is optimized online and forced to fit inside the outer surface layer. Overall, our method enables increasingly denoised, detailed and complete surface reconstructions, fast motion tracking performance and plausible inner body shape reconstruction in real-time. In particular, experiments show improved fast motion tracking and loop closure performance on more challenging scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhou, Yi; He, Chong; Li, Zimo; Xiao, Shuangjiu; Huang, Zeng; Li, Hao
Auto-Conditioned Recurrent Networks for Extended Complex Human Motion Synthesis Proceedings Article
In: Proceedings of the 6th International Conference on Learning Representations, ICLR, Vancouver, British Columbia, Canada, 2018.
@inproceedings{zhou_auto-conditioned_2018,
title = {Auto-Conditioned Recurrent Networks for Extended Complex Human Motion Synthesis},
author = {Yi Zhou and Chong He and Zimo Li and Shuangjiu Xiao and Zeng Huang and Hao Li},
url = {https://openreview.net/forum?id=r11Q2SlRW},
year = {2018},
date = {2018-04-01},
booktitle = {Proceedings of the 6th International Conference on Learning Representations},
publisher = {ICLR},
address = {Vancouver, British Columbia, Canada},
abstract = {We present a real-time method for synthesizing highly complex human motions using a novel training regime we call the auto-conditioned Recurrent Neural Network (acRNN). Recently, researchers have attempted to synthesize new motion by using autoregressive techniques, but existing methods tend to freeze or diverge after a couple of seconds due to an accumulation of errors that are fed back into the network. Furthermore, such methods have only been shown to be reliable for relatively simple human motions, such as walking or running. In contrast, our approach can synthesize arbitrary motions with highly complex styles, including dances or martial arts in addition to locomotion. The acRNN is able to accomplish this by explicitly accommodating for autoregressive noise accumulation during training. Our work is the first to our knowledge that demonstrates the ability to generate over 18,000 continuous frames (300 seconds) of new complex human motion w.r.t. different styles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Haiwei; Chen, Samantha; Rosenberg, Evan Suma
Redirected Walking Strategies in Irregularly Shaped and Dynamic Physical Environments Proceedings Article
In: Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces, IEEE, Reutlingen, Germany, 2018.
@inproceedings{chen_redirected_2018,
title = {Redirected Walking Strategies in Irregularly Shaped and Dynamic Physical Environments},
author = {Haiwei Chen and Samantha Chen and Evan Suma Rosenberg},
url = {http://wevr.adalsimeone.me/2018/WEVR2018_Chen.pdf},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces},
publisher = {IEEE},
address = {Reutlingen, Germany},
abstract = {Redirected walking (RDW) is a Virtual Reality (VR) locomotion technique that enables the exploration of a large virtual environment (VE) within a small physical space via real walking. Thus far, the physical environment has generally been assumed to be rectangular, static, and free of obstacles. However, it is unlikely that real-world locations that may be used for VR fulfill these constraints. In addition, accounting for a dynamically changing physical environment allows RDWalgorithms to accommodate gradually mapped physical environments and moving objects. In this work, we introduce novel approaches that adapt RDWalgorithms to support irregularly shaped and dynamic physical environments. Our methods are divided into three categories: novel RDW Greedy Algorithms that provide a generalized approach for any VE, adapted RDW Planning Algorithms that provide an optimized solution when virtual path prediction is available, and last but not least, techniques for representing irregularly shaped and dynamic physical environments that can improve performance of RDW algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Batsos, Konstantinos; Mordohai, Philippos
High-Resolution Stereo Matching based on Sampled Photoconsistency Computation Proceedings Article
In: Proceedings of the British Machine Vision Conference 2017., London, UK, 2017.
@inproceedings{legendre_high-resolution_2017,
title = {High-Resolution Stereo Matching based on Sampled Photoconsistency Computation},
author = {Chloe LeGendre and Konstantinos Batsos and Philippos Mordohai},
url = {http://ict.usc.edu/pubs/High-Resolution%20Stereo%20Matching%20based%20on%20Sampled%20Photoconsistency%20Computation.pdf},
year = {2017},
date = {2017-09-01},
booktitle = {Proceedings of the British Machine Vision Conference 2017.},
address = {London, UK},
abstract = {We propose an approach to binocular stereo that avoids exhaustive photoconsistency computations at every pixel, since they are redundant and computationally expensive, especially for high resolution images. We argue that developing scalable stereo algorithms is critical as image resolution is expected to continue increasing rapidly. Our approach relies on oversegmentation of the images into superpixels, followed by photoconsistency computation for only a random subset of the pixels of each superpixel. This generates sparse reconstructed points which are used to fit planes. Plane hypotheses are propagated among neighboring superpixels, and they are evaluated at each superpixel by selecting a random subset of pixels on which to aggregate photoconsistency scores for the competing planes. We performed extensive tests to characterize the performance of this algorithm in terms of accuracy and speed on the full-resolution stereo pairs of the 2014 Middlebury benchmark that contains up to 6-megapixel images. Our results show that very large computational savings can be achieved at a small loss of accuracy. A multi-threaded implementation of our method 1 is faster than other methods that achieve similar accuracy and thus it provides a useful accuracy-speed tradeoff.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Hyunh, Loc; Wang, Shanhe; Debevec, Paul
Modeling vellus facial hair from asperity scattering silhouettes Proceedings Article
In: Proceedings of SIGGRAPH 2017, pp. 1–2, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5008-2.
@inproceedings{legendre_modeling_2017,
title = {Modeling vellus facial hair from asperity scattering silhouettes},
author = {Chloe LeGendre and Loc Hyunh and Shanhe Wang and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?doid=3084363.3085057},
doi = {10.1145/3084363.3085057},
isbn = {978-1-4503-5008-2},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of SIGGRAPH 2017},
pages = {1–2},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a technique for modeling the vellus hair over the face based on observations of asperity scattering along a subject's silhouette. We photograph the backlit subject in profile and three-quarters views with a high-resolution DSLR camera to observe the vellus hair on the side and front of the face and separately acquire a 3D scan of the face geometry and texture. We render a library of backlit vellus hair patch samples with different geometric parameters such as density, orientation, and curvature, and we compute image statistics for each set of parameters. We trace the silhouette contour in each face image and straighten the backlit hair silhouettes using image resampling. We compute image statistics for each section of the facial silhouette and determine which set of hair modeling parameters best matches the statistics. We then generate a complete set of vellus hairs for the face by interpolating and extrapolating the matched parameters over the skin. We add the modeled vellus hairs to the 3D facial scan and generate renderings under novel lighting conditions, generally matching the appearance of real photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Krissman, David; Debevec, Paul
Improved Chromakey of Hair Strands via Orientation Filter Convolution Proceedings Article
In: Proceeding of SIGGRAPH '17 ACM SIGGRAPH 2017, pp. 1–2, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5015-0.
@inproceedings{legendre_improved_2017,
title = {Improved Chromakey of Hair Strands via Orientation Filter Convolution},
author = {Chloe LeGendre and David Krissman and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=3102200},
doi = {10.1145/3102163.3102200},
isbn = {978-1-4503-5015-0},
year = {2017},
date = {2017-07-01},
booktitle = {Proceeding of SIGGRAPH '17 ACM SIGGRAPH 2017},
pages = {1–2},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a technique for improving the alpha maing of challenging green-screen video sequences involving hair strands. As hair strands are thin and can be semi-translucent, they are especially hard to separate from a background. However, they appear as extended lines and thus have a strong response when convolved with oriented filters, even in the presence of noise. We leverage this oriented filter response to robustly locate hair strands within each frame of an actor’s performance filmed in front of a green-screen. We demonstrate using production video footage that individual hair fibers excluded from a coarse artist’s matte can be located and then added to the foreground element, qualitatively improving the composite result without added manual labor.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Laine, Samuli; Karras, Tero; Aila, Timo; Herva, Antti; Saito, Shunsuke; Yu, Ronald; Li, Hao; Lehtinen, Jaakko
Production-level facial performance capture using deep convolutional neural networks Proceedings Article
In: Proceedings of the ACM SIGGRAPH / Eurographics Symposium on Computer Animation, pp. 1–10, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5091-4.
@inproceedings{laine_production-level_2017,
title = {Production-level facial performance capture using deep convolutional neural networks},
author = {Samuli Laine and Tero Karras and Timo Aila and Antti Herva and Shunsuke Saito and Ronald Yu and Hao Li and Jaakko Lehtinen},
url = {http://dl.acm.org/citation.cfm?doid=3099564.3099581},
doi = {10.1145/3099564.3099581},
isbn = {978-1-4503-5091-4},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the ACM SIGGRAPH / Eurographics Symposium on Computer Animation},
pages = {1–10},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a real-time deep learning framework for video-based facial performance capture—the dense 3D tracking of an actor's face given a monocular video. Our pipeline begins with accurately capturing a subject using a high-end production facial capture pipeline based on multi-view stereo tracking and artist-enhanced animations. With 5–10 minutes of captured footage, we train a convolutional neural network to produce high-quality output, including self-occluded regions, from a monocular video sequence of that subject. Since this 3D facial performance capture is fully automated, our system can drastically reduce the amount of labor involved in the development of modern narrative-driven video games or films involving realistic digital doubles of actors and potentially hours of animated dialogue per character. We compare our results with several state-of-the-art monocular real-time facial capture techniques and demonstrate compelling animation inference in challenging areas such as eyes and lips.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Saito, Shunsuke; Wei, Lingyu; Hu, Liwen; Nagano, Koki; Li, Hao
Photorealistic Facial Texture Inference Using Deep Neural Networks Proceedings Article
In: Proceedings of the 30th IEEE International Conference on Computer Vision and Pattern Recognition 2017 (CVPR 2017), IEEE, Honolulu, HI, 2017.
@inproceedings{saito_photorealistic_2017,
title = {Photorealistic Facial Texture Inference Using Deep Neural Networks},
author = {Shunsuke Saito and Lingyu Wei and Liwen Hu and Koki Nagano and Hao Li},
url = {https://arxiv.org/abs/1612.00523},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the 30th IEEE International Conference on Computer Vision and Pattern Recognition 2017 (CVPR 2017)},
publisher = {IEEE},
address = {Honolulu, HI},
abstract = {We present a data-driven inference method that can synthesize a photorealistic texture map of a complete 3D face model given a partial 2D view of a person in the wild. After an initial estimation of shape and low-frequency albedo, we compute a high-frequency partial texture map, without the shading component, of the visible face area. To extract the fine appearance details from this incomplete input, we introduce a multi-scale detail analysis technique based on midlayer feature correlations extracted from a deep convolutional neural network. We demonstrate that fitting a convex combination of feature correlations from a high-resolution face database can yield a semantically plausible facial detail description of the entire face. A complete and photorealistic texture map can then be synthesized by iteratively optimizing for the reconstructed feature correlations. Using these high-resolution textures and a commercial rendering framework, we can produce high-fidelity 3D renderings that are visually comparable to those obtained with state-of-theart multi-view face capture systems. We demonstrate successful face reconstructions from a wide range of low resolution input images, including those of historical figures. In addition to extensive evaluations, we validate the realism of our results using a crowdsourced user study.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, G.; Nagano, K.; Huynh, L.; Saito, S.; Busch, J.; Jones, A.; Li, H.; Debevec, P.
Multi-View Stereo on Consistent Face Topology Journal Article
In: Computer Graphics Forum, vol. 36, no. 2, pp. 295–309, 2017, ISSN: 01677055.
@article{fyffe_multi-view_2017,
title = {Multi-View Stereo on Consistent Face Topology},
author = {G. Fyffe and K. Nagano and L. Huynh and S. Saito and J. Busch and A. Jones and H. Li and P. Debevec},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.13127/epdf},
doi = {10.1111/cgf.13127},
issn = {01677055},
year = {2017},
date = {2017-05-01},
journal = {Computer Graphics Forum},
volume = {36},
number = {2},
pages = {295–309},
abstract = {We present a multi-view stereo reconstruction technique that directly produces a complete high-fidelity head model with consistent facial mesh topology. While existing techniques decouple shape estimation and facial tracking, our framework jointly optimizes for stereo constraints and consistent mesh parameterization. Our method is therefore free from drift and fully parallelizable for dynamic facial performance capture. We produce highly detailed facial geometries with artist-quality UV parameterization, including secondary elements such as eyeballs, mouth pockets, nostrils, and the back of the head. Our approach consists of deforming a common template model to match multi-view input images of the subject, while satisfying cross-view, cross-subject, and cross-pose consistencies using a combination of 2D landmark detection, optical flow, and surface and volumetric Laplacian regularization. Since the flow is never computed between frames, our method is trivially parallelized by processing each frame independently. Accurate rigid head pose is extracted using a PCA-based dimension reduction and denoising scheme. We demonstrate high-fidelity performance capture results with challenging head motion and complex facial expressions around eye and mouth regions. While the quality of our results is on par with the current state-of-the-art, our approach can be fully parallelized, does not suffer from drift, and produces face models with production-quality mesh topologies.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Berkiten, Sema; Halber, Maciej; Solomon, Justin; Ma, Chongyang; Li, Hao; Rusinkiewicz, Szymon
Learning Detail Transfer based on Geometric Features Journal Article
In: Computer Graphics Forum, vol. 36, no. 2, pp. 361–373, 2017, ISSN: 01677055.
@article{berkiten_learning_2017,
title = {Learning Detail Transfer based on Geometric Features},
author = {Sema Berkiten and Maciej Halber and Justin Solomon and Chongyang Ma and Hao Li and Szymon Rusinkiewicz},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.13132/full},
doi = {10.1111/cgf.13132},
issn = {01677055},
year = {2017},
date = {2017-05-01},
journal = {Computer Graphics Forum},
volume = {36},
number = {2},
pages = {361–373},
abstract = {The visual richness of computer graphics applications is frequently limited by the difficulty of obtaining high-quality, detailed 3D models. This paper proposes a method for realistically transferring details (specifically, displacement maps) from existing high-quality 3D models to simple shapes that may be created with easy-to-learn modeling tools. Our key insight is to use metric learning to find a combination of geometric features that successfully predicts detail-map similarities on the source mesh; we use the learned feature combination to drive the detail transfer. The latter uses a variant of multi-resolution non-parametric texture synthesis, augmented by a high-frequency detail transfer step in texture space. We demonstrate that our technique can successfully transfer details among a variety of shapes including furniture and clothing.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2016
Fyffe, Graham; Graham, Paul; Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Journal Article
In: Computer Graphics Forum, 2016, ISSN: 1467-8659.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{fyffe_near-instant_2016,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Graham Fyffe and Paul Graham and Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.12837/abstract},
doi = {10.1111/cgf.12837},
issn = {1467-8659},
year = {2016},
date = {2016-05-01},
journal = {Computer Graphics Forum},
abstract = {We present a near-instant method for acquiring facial geometry and reflectance using a set of commodity DSLR cameras and flashes. Our setup consists of twenty-four cameras and six flashes which are fired in rapid succession with subsets of the cameras. Each camera records only a single photograph and the total capture time is less than the 67ms blink reflex. The cameras and flashes are specially arranged to produce an even distribution of specular highlights on the face. We employ this set of acquired images to estimate diffuse color, specular intensity, specular exponent, and surface orientation at each point on the face. We further refine the facial base geometry obtained from multi-view stereo using estimated diffuse and specular photometric information. This allows final submillimeter surface mesostructure detail to be obtained via shape-from-specularity. The final system uses commodity components and produces models suitable for authoring high-quality digital human characters.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC, Virtual Humans
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {Graphics, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Traum, David; Jones, Andrew; Hays, Kia; Maio, Heather; Alexander, Oleg; Artstein, Ron; Debevec, Paul; Gainer, Alesia; Georgila, Kallirroi; Haase, Kathleen; Jungblut, Karen; Leuski, Anton; Smith, Stephen; Swartout, William
New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling Book Section
In: Interactive Storytelling, vol. 9445, pp. 269–281, Springer International Publishing, Copenhagen, Denmark, 2015, ISBN: 978-3-319-27035-7 978-3-319-27036-4.
Abstract | Links | BibTeX | Tags: Graphics, Virtual Humans
@incollection{traum_new_2015,
title = {New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling},
author = {David Traum and Andrew Jones and Kia Hays and Heather Maio and Oleg Alexander and Ron Artstein and Paul Debevec and Alesia Gainer and Kallirroi Georgila and Kathleen Haase and Karen Jungblut and Anton Leuski and Stephen Smith and William Swartout},
url = {http://link.springer.com/10.1007/978-3-319-27036-4_26},
isbn = {978-3-319-27035-7 978-3-319-27036-4},
year = {2015},
date = {2015-12-01},
booktitle = {Interactive Storytelling},
volume = {9445},
pages = {269–281},
publisher = {Springer International Publishing},
address = {Copenhagen, Denmark},
abstract = {We describe a digital system that allows people to have an interactive conversation with a human storyteller (a Holocaust survivor) who has recorded a number of dialogue contributions, including many compelling narratives of his experiences and thoughts. The goal is to preserve as much as possible of the experience of face-to-face interaction. The survivor's stories, answers to common questions, and testimony are recorded in high ⬚delity, and then delivered interactively to an audience as responses to spoken questions. People can ask questions and receive answers on a broad range of topics including the survivor's experiences before, after and during the war, his attitudes and philosophy. Evaluation results show that most user questions can be addressed by the system, and that audiences are highly engaged with the resulting interaction.},
keywords = {Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Graham, Paul; Fyffe, Graham; Tonwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Talks, pp. 1–1, ACM Press, 2015, ISBN: 978-1-4503-3636-9.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{graham_near-instant_2015,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Paul Graham and Graham Fyffe and Borom Tonwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Near-Instant%20Capture%20of%20High-Resolution%20Facial%20Geometry%20and%20Reflectance.pdf},
doi = {10.1145/2775280.2792561},
isbn = {978-1-4503-3636-9},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Talks},
pages = {1–1},
publisher = {ACM Press},
abstract = {Modeling realistic human characters is frequently done using 3D recordings of the shape and appearance of real people, often across a set of different facial expressions to build blendshape facial models. Believable characters that cross the "Uncanny Valley" require high-quality geometry, texture maps, reflectance properties, and surface detail at the level of skin pores and fine wrinkles. Unfortunately, there has not yet been a technique for recording such datasets that is near-instantaneous and low-cost. While some facial capture techniques are instantaneous and inexpensive [Beeler et al. 2010], these do not generally provide lighting-independent texture maps, specular reflectance information, or high-resolution surface normal detail for relighting. In contrast, techniques which use multiple photographs from spherical lighting setups [Ghosh et al. 2011] do capture such reflectance properties, at the expense of longer capture times and complicated custom equipment.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham
Skin Stretch - Simulating Dynamic Skin Microgeometry Proceedings Article
In: ACM SIGGRAPH 2015 Computer Animation Festival, pp. 133, Los Angeles, CA, 2015.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{nagano_skin_2015-1,
title = {Skin Stretch - Simulating Dynamic Skin Microgeometry},
author = {Koki Nagano and Graham Fyffe},
url = {http://ict.usc.edu/pubs/Skin%20Stretch%20-%20Simulating%20Dynamic%20Skin%20Microgeometry.pdf},
doi = {10.1145/2766894},
year = {2015},
date = {2015-08-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
volume = {34},
number = {4},
pages = {133},
address = {Los Angeles, CA},
abstract = {This demonstration of the effects of skin microstructure deformation on high-resolution dynamic facial rendering features the state-of-the-art skin in microstructure simulation, facial scanning, and rendering. Facial animations made with the technique show more realistic and expressive skin under facial expression.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Metallo, Adam; Rossi, Vincent; Blundell, Jonathan; Waibel, Günter; Graham, Paul; Fyffe, Graham; Yu, Xueming; Debevec, Paul
Scanning and printing a 3D portrait of president Barack Obama Proceedings Article
In: SIGGRAPH 2015: Studio, pp. 19, ACM, Los Angeles, CA, 2015.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{metallo_scanning_2015,
title = {Scanning and printing a 3D portrait of president Barack Obama},
author = {Adam Metallo and Vincent Rossi and Jonathan Blundell and Günter Waibel and Paul Graham and Graham Fyffe and Xueming Yu and Paul Debevec},
url = {http://ict.usc.edu/pubs/Scanning%20and%20Printing%20a%203D%20Portrait%20of%20President%20Barack%20Obama.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015: Studio},
pages = {19},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {On June 9th, 2014, we traveled to the State Dining Room of The White House to create a 3D Portrait of President Barack Obama using state-of-the-art 3D scanning and printing technology, producing the modern equivalent of the plaster life masks of President Lincoln from the 1860's.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Xueming; Wang, Shanhe; Busch, Jay; Phan, Thai; McSheery, Tracy; Bolas, Mark; Debevec, Paul
Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Posters, pp. 94, ACM, Los Angeles, CA, 2015.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC
@inproceedings{yu_virtual_2015,
title = {Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking},
author = {Xueming Yu and Shanhe Wang and Jay Busch and Thai Phan and Tracy McSheery and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Headcam%20-%20Pantilt%20Mirror-based%20Facial%20Performance%20Tracking.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Posters},
pages = {94},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {High-end facial performance capture solutions typically use head-mounted camera systems which provide one or more close-up video streams of each actor's performance. These provide clear views of each actor's performance, but can be bulky, uncomfortable, get in the way of sight lines, and prevent actors from getting close to each other. To address this, we propose a virtual head-mounted camera system: an array of cameras placed around around the performance capture volume which automatically track zoomed-in, sharply focussed, high-resolution views of the each actor's face from a multitude of directions. The resulting imagery can be used in conjunction with body motion capture data to derive nuanced facial performances without head-mounted cameras.},
keywords = {Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Bolas, Mark; Debevec, Paul
An Automultiscopic Projector Array for Interactive Digital Humans Proceedings Article
In: SIGGRAPH 2015, pp. 1–1, ACM Press, Los Angeles, CA, 2015, ISBN: 978-1-4503-3635-2.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC
@inproceedings{jones_automultiscopic_2015,
title = {An Automultiscopic Projector Array for Interactive Digital Humans},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Automultiscopic%20Projector%20Array%20for%20Interactive%20Digital%20Humans.pdf},
doi = {10.1145/2782782.2792494},
isbn = {978-1-4503-3635-2},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015},
pages = {1–1},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear. Our display uses a dense array of video projectors to generate many images with high-angular density over a wide-field of view. As each user moves around the display, their eyes smoothly transition from one view to the next. The display is ideal for displaying life-size human subjects as it allows for natural personal interactions with 3D cues such as eye gaze and spatial hand gestures. In this installation, we will explore ”time-offset” interactions with recorded 3D human subjects.},
keywords = {Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham; Alexander, Oleg; Barbiç, Jernej; Li, Hao; Ghosh, Abhijeet; Debevec, Paul
Skin Microstructure Deformation with Displacement Map Convolution Journal Article
In: ACM Transactions on Graphics, vol. 34, no. 4, pp. 1–10, 2015, ISSN: 07300301.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{nagano_skin_2015,
title = {Skin Microstructure Deformation with Displacement Map Convolution},
author = {Koki Nagano and Graham Fyffe and Oleg Alexander and Jernej Barbiç and Hao Li and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Skin%20Microstructure%20Deformation%20with%20Displacement%20Map%20Convolution.pdf},
doi = {10.1145/2766894},
issn = {07300301},
year = {2015},
date = {2015-07-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
journal = {ACM Transactions on Graphics},
volume = {34},
number = {4},
pages = {1–10},
address = {Los Angeles, CA},
abstract = {We present a technique for synthesizing the effects of skin microstructure deformation by anisotropically convolving a high-resolution displacement map to match normal distribution changes in measured skin samples. We use a 10-micron resolution scanning technique to measure several in vivo skin samples as they are stretched and compressed in different directions, quantifying how stretching smooths the skin and compression makes it rougher. We tabulate the resulting surface normal distributions, and show that convolving a neutral skin microstructure displacement map with blurring and sharpening filters can mimic normal distribution changes and microstructure deformations. We implement the spatially-varying displacement map filtering on the GPU to interactively render the effects of dynamic microgeometry on animated faces obtained from high-resolution facial scans.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Saito, Shunsuke; Huang, Zeng; Natsume, Ryota; Morishima, Shigeo; Kanazawa, Angjoo; Li, Hao
PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization Journal Article
In: arXiv:1905.05172 [cs], 2015.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{saito_pifu_2015,
title = {PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization},
author = {Shunsuke Saito and Zeng Huang and Ryota Natsume and Shigeo Morishima and Angjoo Kanazawa and Hao Li},
url = {http://arxiv.org/abs/1905.05172},
year = {2015},
date = {2015-05-01},
journal = {arXiv:1905.05172 [cs]},
abstract = {We introduce Pixel-aligned Implicit Function (PIFu), a highly effective implicit representation that locally aligns pixels of 2D images with the global context of their corresponding 3D object. Using PIFu, we propose an end-to-end deep learning method for digitizing highly detailed clothed humans that can infer both 3D surface and texture from a single image, and optionally, multiple input images. Highly intricate shapes, such as hairstyles, clothing, as well as their variations and deformations can be digitized in a unified way. Compared to existing representations used for 3D deep learning, PIFu can produce high-resolution surfaces including largely unseen regions such as the back of a person. In particular, it is memory efficient unlike the voxel representation, can handle arbitrary topology, and the resulting surface is spatially aligned with the input image. Furthermore, while previous techniques are designed to process either a single image or multiple views, PIFu extends naturally to arbitrary number of views. We demonstrate high-resolution and robust reconstructions on real world images from the DeepFashion dataset, which contains a variety of challenging clothing types. Our method achieves state-of-the-art performance on a public benchmark and outperforms the prior work for clothed human digitization from a single image.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Fyffe, Graham; Debevec, Paul
Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination Proceedings Article
In: Preceedings of ICCP 2015, pp. 1–10, IEEE, Houston, Texas, 2015.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{fyffe_single-shot_2015,
title = {Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination},
author = {Graham Fyffe and Paul Debevec},
url = {http://ict.usc.edu/pubs/Single-Shot%20Reflectance%20Measurement%20from%20Polarized%20Color%20Gradient%20Illumination.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Preceedings of ICCP 2015},
pages = {1–10},
publisher = {IEEE},
address = {Houston, Texas},
abstract = {We present a method for acquiring the per-pixel diffuse albedo, specular albedo, and surface normal maps of a subject at a single instant in time. The method is single shot, requiring no optical flow, and per-pixel, making no assumptions regarding albedo statistics or surface connectivity. We photograph the subject inside a spherical illumination device emitting a static lighting pattern of vertically polarized RGB color gradients aligned with the XYZ axes, and horizontally polarized RGB color gradients in versely aligned with the XYZ axes. We capture simultaneous photographs using one of two possible setups: a single view setup using a coaxially aligned camera pair with a polarizing beam splitter, and a multi-view stereo setup with different orientations of linear polarizing filters placed on the cameras, enabling high-quality geometry reconstruction. From this lighting we derive full-color diffuse albedo, single-channel specular albedo suitable for dielectric materials, and polarization-preserving surface normals which are free of corruption from subsurface scattering. We provide simple formulae to estimate the diffuse albedo, specular albedo, and surface normal maps in the single-view and multi-view cases and show error bounds which are small for many common subjects including faces.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Building a Life-Size Automultiscopic Display Using Consumer Hardware Proceedings Article
In: Proceedings of GPU Technology Conference, San Jose, CA, 2015.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{jones_building_2015,
title = {Building a Life-Size Automultiscopic Display Using Consumer Hardware},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Building%20a%20Life-Size%20Automultiscopic%20Display%20Using%20Consumer%20Hardware.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {Proceedings of GPU Technology Conference},
address = {San Jose, CA},
abstract = {Automultiscopic displays allow multiple users to experience 3D content without the hassle of special glasses or head gear. Such displays generate many simultaneous images with high-angular density, so that each eye perceives a distinct and different view. This presents a unique challenge for content acquisition and rendering. In this talk, we explain how to build an automultiscopic display using off-the-shelf projectors, video-splitters, and graphics cards. We also present a GPU-based algorithm for rendering a large numbers of views from a sparse array of video cameras.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Alexander, Oleg; Feng, Andrew W.; Fyffe, Graham; Ichikari, Ryosuke; Debevec, Paul; Wang, Rhuizhe; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors Proceedings Article
In: Proceedings of the 19th Symposium on Interactive 3D Graphics and Games, pp. 134–134, ACM Press, San Francisco, CA, 2015, ISBN: 978-1-4503-3392-4.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC, Virtual Humans
@inproceedings{casas_rapid_2015,
title = {Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors},
author = {Dan Casas and Oleg Alexander and Andrew W. Feng and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2699276.2721398},
doi = {10.1145/2699276.2721398},
isbn = {978-1-4503-3392-4},
year = {2015},
date = {2015-02-01},
booktitle = {Proceedings of the 19th Symposium on Interactive 3D Graphics and Games},
pages = {134–134},
publisher = {ACM Press},
address = {San Francisco, CA},
abstract = {Creating and animating a realistic 3D human face has been an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. In this paper, we propose a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single Microsoft Kinect and requires no artistic expertise on the part of the capture subject. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires 4 minutes of processing time to transform it into a blendshape model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmark detection and optical flow. We demonstrate the effectiveness of the proposed method by capturing 3D facial models of different human subjects and puppeteering their models in an animation system with real-time facial performance retargeting.},
keywords = {Graphics, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2014
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Debevec, Paul
Driving High-Resolution Facial Scans with Video Performance Capture Journal Article
In: ACM Transactions on Graphics (TOG), vol. 34, no. 1, pp. 1– 13, 2014.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{fyffe_driving_2014,
title = {Driving High-Resolution Facial Scans with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Scans%20with%20Video%20Performance%20Capture.pdf},
year = {2014},
date = {2014-11-01},
journal = {ACM Transactions on Graphics (TOG)},
volume = {34},
number = {1},
pages = {1– 13},
abstract = {We present a process for rendering a realistic facial performance with control of viewpoint and illumination. The performance is based on one or more high-quality geometry and reflectance scans of an actor in static poses, driven by one or more video streams of a performance. We compute optical flow correspondences between neighboring video frames, and a sparse set of correspondences between static scans and video frames. The latter are made possible by leveraging the relightability of the static 3D scans to match the viewpoint(s) and appearance of the actor in videos taken in arbitrary environments. As optical flow tends to compute proper correspondence for some areas but not others, we also compute a smoothed, per-pixel confidence map for every computed flow, based on normalized cross-correlation. These flows and their confidences yield a set of weighted triangulation constraints among the static poses and the frames of a performance. Given a single artist-prepared face mesh for one static pose, we optimally combine the weighted triangulation constraints, along with a shape regularization term, into a consistent 3D geometry solution over the entire performance that is drift free by construction. In contrast to previous work, even partial correspondences contribute to drift minimization, for example, where a successful match is found in the eye region but not the mouth. Our shape regularization employs a differential shape term based on a spatially varying blend of the differential shapes of the static poses and neighboring dynamic poses, weighted by the associated flow confidences. These weights also permit dynamic reflectance maps to be produced for the performance by blending the static scan maps. Finally, as the geometry and maps are represented on a consistent artist-friendly mesh, we render the resulting high-quality animated face geometry and animated reflectance maps using standard rendering tools.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Nagano, Koki; Alexander, Oleg; Barbic, Jernej; Debevec, Paul
Measurement and Modeling of Microfacet Distributions under Deformation Proceedings Article
In: Proceedings of SIGDIAL 2014, ACM, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2960-6.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{nagano_measurement_2014,
title = {Measurement and Modeling of Microfacet Distributions under Deformation},
author = {Koki Nagano and Oleg Alexander and Jernej Barbic and Paul Debevec},
url = {http://ict.usc.edu/pubs/Measurement%20and%20Modeling%20of%20Microfacet%20Distribution%20under%20Deformation%20(abstract%20for%20talk).pdf},
doi = {10.1145/2614106.2614124},
isbn = {978-1-4503-2960-6},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of SIGDIAL 2014},
publisher = {ACM},
address = {Vancouver, British Columbia, Canada},
abstract = {We endeavor to model dynamic microfacet distributions of rough surfaces such as skin to simulate the changes in surface BRDF under stretching and compression. We begin by measuring microfacet distributions at 5-micron scale of several surface patches under controlled deformation. Generally speaking, rough surfaces become flatter and thus shinier as they are pulled tighter, and become rougher under compression. From this data, we build a model of how surface reflectance changes as the material deforms. We then simulate dynamic surface reflectance by modifying the anisotropic roughness parameters of a microfacet distribution model in accordance with animated surface deformations. Furthermore, we directly render such dynamic appearance by driving dynamic micro geometries to demonstrate how they influence the meso-scale surface reflectance.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man” Proceedings Article
In: SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques, Vancouver, Canada, 2014.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{jones_creating_2014,
title = {Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man”},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Creating%20a%20life-sized%20automulitscopic%20Morgan%20Spurlock%20for%20CNNs%20%e2%80%9cInside%20Man%e2%80%9d%20(abstract).pdf},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Vancouver, Canada},
abstract = {We present a system for capturing and rendering life-size 3D human subjects on an automultiscopic display. Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Pahlen, Javier; Jimenez, Jorge; Danvoye, Etienne; Debevec, Paul; Fyffe, Graham; Alexander, Oleg
Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters Proceedings Article
In: SIGGRAPH '14 ACM SIGGRAPH 2014 Courses, pp. 1–384, ACM Press, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2962-0.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{von_der_pahlen_digital_2014,
title = {Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters},
author = {Javier Pahlen and Jorge Jimenez and Etienne Danvoye and Paul Debevec and Graham Fyffe and Oleg Alexander},
url = {http://ict.usc.edu/pubs/Digial%20Ira%20and%20Beyond%20-%20Creating%20Photoreal%20Real-Time%20Digital%20Characters%20(course%20notes).pdf},
doi = {10.1145/2614028.2615407},
isbn = {978-1-4503-2962-0},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH '14 ACM SIGGRAPH 2014 Courses},
pages = {1–384},
publisher = {ACM Press},
address = {Vancouver, British Columbia, Canada},
abstract = {This course explains a complete process for creating next-generation realtime digital human characters, using the Digital Ira collaboration between USC ICT and Activision as an example, covering highres facial scanning, blendshape rigging, video-based performance capture, animation compression, realtime skin and eye shading, hair, latest results, and future directions.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
Interpolating vertical parallax for an autostereoscopic three-dimensional projector array Journal Article
In: Journal of Electronic Imaging, vol. 23, no. 1, 2014, ISSN: 1017-9909.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC
@article{jones_interpolating_2014,
title = {Interpolating vertical parallax for an autostereoscopic three-dimensional projector array},
author = {Andrew Jones and Koki Nagano and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://electronicimaging.spiedigitallibrary.org/article.aspx?doi=10.1117/1.JEI.23.1.011005},
doi = {10.1117/1.JEI.23.1.011005},
issn = {1017-9909},
year = {2014},
date = {2014-03-01},
journal = {Journal of Electronic Imaging},
volume = {23},
number = {1},
abstract = {We present a technique for achieving tracked vertical parallax for multiple users using a variety of autostereoscopic projector array setups, including front- and rear-projection and curved display surfaces. This hybrid parallax approach allows for immediate horizontal parallax as viewers move left and right and tracked parallax as they move up and down, allowing cues such as three-dimensional (3-D) perspective and eye contact to be conveyed faithfully. We use a low-cost RGB-depth sensor to simultaneously track multiple viewer head positions in 3-D space, and we interactively update the imagery sent to the array so that imagery directed to each viewer appears from a consistent and correct vertical perspective. Unlike previous work, we do not assume that the imagery sent to each projector in the array is rendered from a single vertical perspective. This lets us apply hybrid parallax to displays where a single projector forms parts of multiple viewers’ imagery. Thus, each individual projected image is rendered with multiple centers of projection, and might show an object from above on the left and from below on the right. We demonstrate this technique using a dense horizontal array of pico-projectors aimed into an anisotropic vertical diffusion screen, yielding 1.5 deg angular resolution over 110 deg field of view. To create a seamless viewing experience for multiple viewers, we smoothly interpolate the set of viewer heights and distances on a per-vertex basis across the array’s field of view, reducing image distortion, cross talk, and artifacts from tracking errors.},
keywords = {Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Smith, Stephen; Traum, David; Alexander, Oleg; Leuski, Anton; Jones, Andrew; Georgila, Kallirroi; Debevec, Paul; Swartout, William; Maio, Heather
Time-offset Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of IUI 2014, pp. 163–168, ACM Press, Haifa, Israel, 2014, ISBN: 978-1-4503-2184-6.
Abstract | Links | BibTeX | Tags: Graphics, Virtual Humans
@inproceedings{artstein_time-offset_2014,
title = {Time-offset Interaction with a Holocaust Survivor},
author = {Ron Artstein and Stephen Smith and David Traum and Oleg Alexander and Anton Leuski and Andrew Jones and Kallirroi Georgila and Paul Debevec and William Swartout and Heather Maio},
url = {http://ict.usc.edu/pubs/Time-Offset%20Interaction%20with%20a%20Holocaust%20Survivor.pdf},
doi = {10.1145/2557500.2557540},
isbn = {978-1-4503-2184-6},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of IUI 2014},
pages = {163–168},
publisher = {ACM Press},
address = {Haifa, Israel},
abstract = {Time-offset interaction is a new technology that allows for two-way communication with a person who is not available for conversation in real time: a large set of statements are prepared in advance, and users access these statements through natural conversation that mimics face-to-face interaction. Conversational reactions to user questions are retrieved through a statistical classifier, using technology that is similar to previous interactive systems with synthetic characters; however, all of the retrieved utterances are genuine statements by a real person. Recordings of answers, listening and idle behaviors, and blending techniques are used to create a persistent visual image of the person throughout the interaction. A proof-of-concept has been implemented using the likeness of Pinchas Gutter, a Holocaust survivor, enabling short conversations about his family, his religious views, and resistance. This proof-of-concept has been shown to dozens of people, from school children to Holocaust scholars, with many commenting on the impact of the experience and potential for this kind of interface.},
keywords = {Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2013
Nagano, Koki; Jones, Andrew; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
An Autostereoscopic Projector Array Optimized for 3D Facial Display Proceedings Article
In: SIGGRAPH 2013 Emerging Technologies, 2013.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{nagano_autostereoscopic_2013,
title = {An Autostereoscopic Projector Array Optimized for 3D Facial Display},
author = {Koki Nagano and Andrew Jones and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Autostereoscopic%20Projector%20Array%20Optimized%20for%203D%20Facial%20Display%20.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH 2013 Emerging Technologies},
abstract = {Video projectors are rapidly shrinking in size, power consumption, and cost. Such projectors provide unprecedented flexibility to stack, arrange, and aim pixels without the need for moving parts. This dense projector display is optimized in size and resolution to display an autostereoscopic life-sized 3D human face. It utilizes 72 Texas Instruments PICO projectors to illuminate a 30 cm x 30 cm anisotropic screen with a wide 110-degree field of view. The demonstration includes both live scanning of subjects and virtual animated characters.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Graham, Paul; Nagano, Koki; Busch, Jay; Debevec, Paul
Driving High-Resolution Facial Blendshapes with Video Performance Capture Proceedings Article
In: SIGGRAPH, Anaheim, CA, 2013.
Links | BibTeX | Tags: Graphics, UARC
@inproceedings{fyffe_driving_2013,
title = {Driving High-Resolution Facial Blendshapes with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Graham and Koki Nagano and Jay Busch and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Blendshapes%20with%20Video%20Performance.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH},
address = {Anaheim, CA},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Alexander, Oleg; Busch, Jay; Graham, Paul; Tunwattanapong, Borom; Jones, Andrew; Nagano, Koki; Ichikari, Ryosuke; Debevec, Paul; Fyffe, Graham
Digital Ira: High-Resolution Facial Performance Playback Proceedings Article
In: SIGGRAPH 2013 Real-Time Live! The 40th International Conference and Exhibition on Computer Graphics and Interactive Techniques, Anaheim, CA, 2013.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{alexander_digital_2013,
title = {Digital Ira: High-Resolution Facial Performance Playback},
author = {Oleg Alexander and Jay Busch and Paul Graham and Borom Tunwattanapong and Andrew Jones and Koki Nagano and Ryosuke Ichikari and Paul Debevec and Graham Fyffe},
url = {http://gl.ict.usc.edu/Research/DigitalIra/},
doi = {10.1145/2503385.2503387},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH 2013 Real-Time Live! The 40th International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Anaheim, CA},
abstract = {In this collaboration between Activision and USC ICT, we tried to create a real-time, photoreal digital human character which could be seen from any viewpoint, any lighting, and could perform realistically from video performance capture even in a tight closeup. In addition, we needed this to run in a game-ready production pipeline. To achieve this, we scanned the actor in thirty high-resolution expressions using the USC ICT's new Light Stage X system [Ghosh et al. SIGGRAPHAsia2011] and chose eight expressions for the real-time performance rendering. To record the performance, we shot multi-view 30fps video of the actor performing improvised lines using the same multi-camera rig. We used a new tool called Vuvuzela to interactively and precisely correspond all expression (u,v)'s to the neutral expression, which was retopologized to an artist mesh. Our new offline animation solver works by creating a performance graph representing dense GPU optical flow between the video frames and the eight expressions. This graph gets pruned by analyzing the correlation between the video frames and the expression scans over twelve facial regions. The algorithm then computes dense optical flow and 3D triangulation yielding per-frame spatially varying blendshape weights approximating the performance.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Tunwattanapong, Borom; Fyffe, Graham; Graham, Paul; Busch, Jay; Yu, Xueming; Ghosh, Abhijeet; Debevec, Paul
Acquiring Reflectance and Shape from Continuous Spherical Harmonic Illumination Journal Article
In: ACM Transactions on Graphics, vol. 32, no. 4, 2013, ISSN: 07300301.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{tunwattanapong_acquiring_2013,
title = {Acquiring Reflectance and Shape from Continuous Spherical Harmonic Illumination},
author = {Borom Tunwattanapong and Graham Fyffe and Paul Graham and Jay Busch and Xueming Yu and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Acquiring%20Re%ef%ac%82ectance%20and%20Shape%20from%20Continuous%20Spherical%20Harmonic%20Illumination.pdf},
doi = {10.1145/2461912.2461944},
issn = {07300301},
year = {2013},
date = {2013-07-01},
journal = {ACM Transactions on Graphics},
volume = {32},
number = {4},
abstract = {We present a novel technique for acquiring the geometry and spatially-varying reflectance properties of 3D objects by observing them under continuous spherical harmonic illumination conditions. The technique is general enough to characterize either entirely specular or entirely diffuse materials, or any varying combination across the surface of the object. We employ a novel computational illumination setup consisting of a rotating arc of controllable LEDs which sweep out programmable spheres of incident illumination during 1-second exposures. We illuminate the object with a succession of spherical harmonic illumination conditions, as well as photographed environmental lighting for validation. From the response of the object to the harmonics, we can separate diffuse and specular reflections, estimate world-space diffuse and specular normals, and compute anisotropic roughness parameters for each view of the object. We then use the maps of both diffuse and specular reflectance to form correspondences in a multiview stereo algorithm, which allows even highly specular surfaces to be corresponded across views. The algorithm yields a complete 3D model and a set of merged reflectance maps. We use this technique to digitize the shape and reflectance of a variety of objects difficult to acquire with other techniques and present validation renderings which match well to photographs in similar lighting.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Alexander, Oleg; Fyffe, Graham; Busch, Jay; Yu, Xueming; Ichikari, Ryosuke; Jones, Andrew; Debevec, Paul; Jimenez, Jorge; Danvoye, Etienne; Antionazzi, Bernardo; Eheler, Mike; Kysela, Zybnek; Pahlen, Javier
Digital Ira: Creating a Real-Time Photoreal Digital Actor Proceedings Article
In: SIGGRAPH Real Time Live!, Anaheim, CA, 2013, ISBN: 978-1-4503-2342-0.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{alexander_digital_2013-1,
title = {Digital Ira: Creating a Real-Time Photoreal Digital Actor},
author = {Oleg Alexander and Graham Fyffe and Jay Busch and Xueming Yu and Ryosuke Ichikari and Andrew Jones and Paul Debevec and Jorge Jimenez and Etienne Danvoye and Bernardo Antionazzi and Mike Eheler and Zybnek Kysela and Javier Pahlen},
url = {http://dl.acm.org/citation.cfm?doid=2503385.2503387},
doi = {10.1145/2503385.2503387},
isbn = {978-1-4503-2342-0},
year = {2013},
date = {2013-07-01},
booktitle = {SIGGRAPH Real Time Live!},
address = {Anaheim, CA},
abstract = {In 2008, the "Digital Emily" project [Alexander et al. 2009] showed how a set of high-resolution facial expressions scanned in a light stage could be rigged into a real-time photoreal digital character and driven with video-based facial animation techniques. However, Digital Emily was rendered offline, involved just the front of the face, and was never seen in a tight closeup. In this collaboration between Activision and USC ICT shown at SIGGRAPH 2013's Real-Time Live venue, we endeavoured to create a real-time, photoreal digital human character which could be seen from any viewpoint, in any lighting, and could perform realistically from video performance capture even in a tight closeup. In addition, we wanted this to run in a real-time game-ready production pipeline, ultimately achieving 180 frames per second for a full-screen character on a two-year old graphics card.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Proceedings Article
In: EUROGRAPHICS, Girona, Spain, 2013.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{graham_measurement-based_2013,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2013},
date = {2013-05-01},
booktitle = {EUROGRAPHICS},
address = {Girona, Spain},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2012
Debevec, Paul
The Light Stages and Their Applications to Photoreal Digital Actors Proceedings Article
In: SIGGRAPH Asia, Singapore, 2012.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{debevec_light_2012,
title = {The Light Stages and Their Applications to Photoreal Digital Actors},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/The%20Light%20Stages%20and%20Their%20Applications%20to%20Photoreal%20Digital%20Actors.pdf},
year = {2012},
date = {2012-11-01},
booktitle = {SIGGRAPH Asia},
address = {Singapore},
abstract = {The Light Stage systems built at UC Berkeley and USC ICT have enabled a variety of facial scanning and reflectance measurement techniques that have been explored in several research papers and used in various commercial applications. This short paper presents the evolutionary history of the Light Stage Systems and some of the techniques and applications they have enabled.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2012, 2012.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@techreport{graham_measurement-based_2012,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2012.pdf},
year = {2012},
date = {2012-11-01},
number = {ICT TR 01 2012},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a technique for generating microstructure-level facial geometry by augmenting a mesostructure-level facial scan with detail synthesized from a set of exemplar skin patches scanned at much higher resolution. We use constrained texture synthesis based on image analogies to increase the resolution of the facial scan in a way that is consistent with the scanned mesostructure. We digitize the exemplar patches with a polarization-based computational illumination technique which considers specular reflection and single scattering. The recorded microstructure patches can be used to synthesize full-facial microstructure detail for either the same subject or to a different subject. We show that the technique allows for greater realism in facial renderings including more accurate reproduction of skin’s specular roughness and anisotropic reflection effects.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {techreport}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Proceedings Article
In: ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12), Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{graham_measurement-based_2012-1,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/A%20Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12)},
address = {Los Angeles, CA},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Yufeng; Peers, Pieter; Debevec, Paul; Ghosh, Abhijeet
Estimating Diffusion Parameters from Polarized Spherical Gradient Illumination Proceedings Article
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), 2012.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{zhu_estimating_2012,
title = {Estimating Diffusion Parameters from Polarized Spherical Gradient Illumination},
author = {Yufeng Zhu and Pieter Peers and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Estimating%20Diffusion%20Parameters%20from%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
abstract = {Accurately modeling and reproducing the appearance of real-world materials is crucial for the production of photoreal imagery of digital scenes and subjects. The appearance of many common materials is the result of subsurface light transport that gives rise to the characteristic “soft” appearance and the unique coloring of such materials. Jensen et al. [2001] introduced the dipole-diffusion approximation to efficiently model isotropic subsurface light transport. The scattering parameters needed to drive the dipole-diffusion approximation are typically estimated by illuminating a homogeneous surface patch with a collimated beam of light, or in the case of spatially varying translucent materials with a dense set of structured light patterns. A disadvantage of most existing techniques is that acquisition time is traded off with spatial density of the scattering parameters.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Graham, Paul; Busch, Jay; Bolas, Mark
A Cell Phone Based Platform for Facial Performance Capture Proceedings Article
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{debevec_cell_2012,
title = {A Cell Phone Based Platform for Facial Performance Capture},
author = {Paul Debevec and Paul Graham and Jay Busch and Mark Bolas},
url = {http://ict.usc.edu/pubs/A%20Cell%20Phone%20Based%20Platform%20for%20Facial%20Performance%20Capture.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
address = {Los Angeles, CA},
abstract = {We demonstrate a novel light probe which can estimate the full dynamic range of a scene with multiple bright light sources. It places diffuse strips between mirrored spherical quadrants, effectively co-locating diffuse and mirrored probes to record the full dynamic range of illumination in a single exposure. From this image, we estimate the intensity of multiple saturated light sources by solving a linear system.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Busch, Jay; Bolas, Mark; Debevec, Paul
A Single-Shot Light Probe Proceedings Article
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{graham_single-shot_2012,
title = {A Single-Shot Light Probe},
author = {Paul Graham and Jay Busch and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Single-Shot%20Light%20Probe.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
address = {Los Angeles, CA},
abstract = {We demonstrate a novel light probe which can estimate the full dynamic range of a scene with multiple bright light sources. It places diffuse strips between mirrored spherical quadrants, effectively co-locating diffuse and mirrored probes to record the full dynamic range of illumination in a single exposure. From this image, we estimate the intensity of multiple saturated light sources by solving a linear system.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Meseth, Jan; Hempel, Shawn; Weidlich, Andrea; Fyffe, Graham; Miller, Craig; Carroll, Paul; Debevec, Paul; Fyffe, Lynn
Improved Linear-Light-Source Material Reflectance Scanning Proceedings Article
In: ACM SIGGRAPH 2012 Talks, 2012.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{meseth_improved_2012,
title = {Improved Linear-Light-Source Material Reflectance Scanning},
author = {Jan Meseth and Shawn Hempel and Andrea Weidlich and Graham Fyffe and Craig Miller and Paul Carroll and Paul Debevec and Lynn Fyffe},
url = {http://ict.usc.edu/pubs/Improved%20Linear-Light-Source%20Material%20Reflectance%20Scanning.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM SIGGRAPH 2012 Talks},
abstract = {We improve the resolution, accuracy, and efficiency of Linear Light Source (LLS) Reflectometry with several acquisition setup and data processing improvements, allowing spatiallyvarying reflectance parameters of complex materials to be recorded with unprecedented accuracy and efficiency.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham
High Fidelity Facial Hair Capture Technical Report
University of Southern California Institute for Creative Technologies Playa Vista, CA, no. ICT TR 02 2012, 2012.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{fyffe_high_2012-1,
title = {High Fidelity Facial Hair Capture},
author = {Graham Fyffe},
url = {https://apps.dtic.mil/sti/trecms/pdf/AD1170996.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {SIGGRAPH},
number = {ICT TR 02 2012},
address = {Playa Vista, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We propose an extension to multi-view face capture that reconstructs high quality facial hair automatically. Multi-view stereo is well known for producing high quality smooth surfaces and meshes, but fails on fine structure such as hair. We exploit this failure, and automatically detect the hairs on a face by careful analysis of the pixel reconstruction error of the multi-view stereo result. Central to our work is a novel stereo matching cost function, which we call equalized cross correlation, that properly accounts for both camera sensor noise and pixel sampling variance. In contrast to previous works that treat hair modeling as a synthesis problem based on image cues, we reconstruct facial hair to explain the same highresolution input photographs used for face reconstruction, producing a result with higher fidelity to the input photographs.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Jurik, Joel; Burnett, Thomas; Klug, Michael; Debevec, Paul
Geometry-Corrected Light Field Rendering for Creating a Holographic Stereogram Proceedings Article
In: CVPR Workshop for Computational Cameras and Displays, Providence, RI, 2012.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{jurik_geometry-corrected_2012,
title = {Geometry-Corrected Light Field Rendering for Creating a Holographic Stereogram},
author = {Joel Jurik and Thomas Burnett and Michael Klug and Paul Debevec},
url = {http://ict.usc.edu/pubs/Geometry-Corrected%20Light%20Field%20Rendering%20for%20Creating%20a%20Holographic%20Stereogram.pdf},
year = {2012},
date = {2012-06-01},
booktitle = {CVPR Workshop for Computational Cameras and Displays},
address = {Providence, RI},
abstract = {We present a technique to record and process a light field of an object in order to produce a printed holographic stereogram. We use a geometry correction process to maximize the depth of field and depth-dependent surface detail even when the array of viewpoints comprising the light field is coarsely sampled with respect to the angular resolution of the printed hologram. We capture the light field data of an object with a digital still camera attached to a 2D translation stage, and generate hogels (holographic elements) for printing by reprojecting the light field onto a photogrammetrically recovered model of the object and querying the relevant rays to be produced by the hologram with respect to this geometry. This results in a significantly clearer image of detail at different depths in the printed holographic stereogram.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Wang, Yi-Hua; Fyffe, Graham; Chen, Bing-Yu; Debevec, Paul
A blendshape model that incorporates physical interaction Journal Article
In: Computer Animation and Virtual Worlds, vol. 23, no. 3-4, pp. 235–243, 2012.
Abstract | Links | BibTeX | Tags: Graphics
@article{ma_blendshape_2012,
title = {A blendshape model that incorporates physical interaction},
author = {Wan-Chun Ma and Yi-Hua Wang and Graham Fyffe and Bing-Yu Chen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20blendshape%20model%20that%20incorporates%20physical%20interaction-2.pdf},
doi = {10.1002/cav.1441},
year = {2012},
date = {2012-05-01},
journal = {Computer Animation and Virtual Worlds},
volume = {23},
number = {3-4},
pages = {235–243},
abstract = {The linear blendshape technique has been intensively used for computer animation and games because of its simplicity and effectiveness. However, it cannot describe rotational deformations and deformations because of self collision or scene interaction. In this paper, we present a new technique to address these two major limitations by introducing physical-based simulation to blendshapes. The proposed technique begins by constructing a mass–spring system for each blendshape target. Each system is initialized in its steady state by setting the rest length of each spring as the edge length of the corresponding target. To begin shape interpolation, we linearly interpolate the rest lengths of the springs according to a given interpolation factor α ∈ [0,1]. The interpolated shape is then generated by computing the equilibrium of the mass–spring system with the interpolated rest lengths. Results from our technique show physically plausible deformations even in the case of large rotations between blendshape targets. In addition, the new blendshape model is able to interact with other scene elements by introducing collision detection and handling to the mass–spring system.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Guarnera, Giuseppe Claudio; Peers, Pieter; Debevec, Paul; Ghosh, Abhijeet
Estimating Surface Normals from Spherical Stokes Reflectance Fields Proceedings Article
In: ECCV Workshop on Color and Photometry in Computer Vision (CPCV), Firenze, Italy, 2012.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{guarnera_estimating_2012,
title = {Estimating Surface Normals from Spherical Stokes Reflectance Fields},
author = {Giuseppe Claudio Guarnera and Pieter Peers and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/Estimating%20Surface%20Normals%20from%20Spherical%20Stokes%20Reflectance%20Fields.pdf},
year = {2012},
date = {2012-03-01},
booktitle = {ECCV Workshop on Color and Photometry in Computer Vision (CPCV)},
address = {Firenze, Italy},
abstract = {In this paper we introduce a novel technique for estimating surface normals from the four Stokes polarization parameters of specularly reflected light under a single spherical incident lighting condition that is either unpolarized or circularly polarized. We illustrate the practicality of our technique by estimating surface normals under uncontrolled outdoor illumination from just four observations from a fixed viewpoint.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham
High Fidelity Facial Hair Capture Proceedings Article
In: SIGGRAPH, Playa Vista, CA, 2012.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{fyffe_high_2012,
title = {High Fidelity Facial Hair Capture},
author = {Graham Fyffe},
url = {http://ict.usc.edu/pubs/High%20Fidelity%20Facial%20Hair%20Capture.pdf},
year = {2012},
date = {2012-01-01},
booktitle = {SIGGRAPH},
number = {ICT TR 02 2012},
address = {Playa Vista, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Modeling human hair from photographs is a topic of ongoing interest to the graphics community. Yet, the literature is predominantly concerned with the hair volume on the scalp, and it remains difficult to capture digital characters with interesting facial hair. Recent stereo-vision-based facial capture systems (e.g. [Furukawa and Ponce 2010][Beeler et al. 2010]) are capable of capturing extremely fine facial detail from high resolution photographs, but any facial hair present on the subject is reconstructed as a blobby mass. Prior work in facial hair photo-modeling is based on learned priors and image cues [Herrera et al. ], and does not reconstruct the individual hairs belonging uniquely to the subject. We propose a method for capturing the three dimensional shape of complex, multi-colored facial hair from a small number of photographs taken simultaneously under uniform illumination. The method produces a set of oriented hair particles, suitable for point-based rendering.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
2011
Ma, Wan-Chun; Wang, Yi-Hua; Fyffe, Graham; Barbic, Jernej; Chen, Bing-Yu; Debevec, Paul
A blendshape model that incorporates physical interaction Proceedings Article
In: SIGGRAPH Asia, Hong Kong, 2011, ISBN: 978-1-4503-1137-3.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ma_blendshape_2011,
title = {A blendshape model that incorporates physical interaction},
author = {Wan-Chun Ma and Yi-Hua Wang and Graham Fyffe and Jernej Barbic and Bing-Yu Chen and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20blendshape%20model%20that%20incorporates%20physical%20interaction.pdf},
doi = {10.1145/2073304.2073343},
isbn = {978-1-4503-1137-3},
year = {2011},
date = {2011-12-01},
booktitle = {SIGGRAPH Asia},
address = {Hong Kong},
abstract = {We present a new technique for physically-plausible shape blending by interpolating the spring rest length parameters of a mass-spring system. This blendshape method begins by constructing two consistent mass-spring systems (i.e., with vertex-wise correspondence and the same topology) for source and target shapes, respectively, and setting the two systems as in their static states. In other words, their edge lengths equal to the rest lengths of the springs. To create an intermediate pose, we generate a new mass-spring system consistent with the source and target ones and set its rest lengths as linearly interpolated between source and target based on an interpolation factor α ε [0, 1]. The new pose is then synthesized by computing the equilibrium given the interpolated rest lengths. In addition, the mass-spring system may interact with other objects in the environment by incorporating collision detection.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Fyffe, Graham; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Debevec, Paul
Multiview Face Capture using Polarized Spherical Gradient Illumination Proceedings Article
In: Proceedings of SIGGRAPH Asia 2011/ACM Trans. on Graphics, 2011.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ghosh_multiview_2011,
title = {Multiview Face Capture using Polarized Spherical Gradient Illumination},
author = {Abhijeet Ghosh and Graham Fyffe and Borom Tunwattanapong and Jay Busch and Xueming Yu and Paul Debevec},
url = {http://ict.usc.edu/pubs/Multiview%20Face%20Capture%20using%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2011},
date = {2011-12-01},
booktitle = {Proceedings of SIGGRAPH Asia 2011/ACM Trans. on Graphics},
volume = {30(6)},
abstract = {We present a novel process for acquiring detailed facial geometry with high resolution diffuse and specular photometric information from multiple viewpoints using polarized spherical gradient illumination. Key to our method is a new pair of linearly polarized lighting patterns which enables multiview diffuse-specular separation under a given spherical illumination condition from just two photographs. The patterns – one following lines of latitude and one following lines of longitude – allow the use of fixed linear polarizers in front of the cameras, enabling more efficient acquisition of diffuse and specular albedo and normal maps from multiple viewpoints. In a second step, we employ these albedo and normal maps as input to a novel multi-resolution adaptive domain message passing stereo reconstruction algorithm to create high resolution facial geometry. To do this, we formulate the stereo reconstruction from multiple cameras in a commonly parameterized domain for multiview reconstruction. We show competitive results consisting of high-resolution facial geometry with relightable reflectance maps using five DSLR cameras. Our technique scales well for multiview acquisition without requiring specialized camera systems for sensing multiple polarization states.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Fyffe, Graham; Yu, Xueming; Ma, Wan-Chun; Busch, Jay; Ichikari, Ryosuke; Bolas, Mark; Debevec, Paul
Head-mounted Photometric Stereo for Performance Capture Proceedings Article
In: 8th European Conference on Visual Media Production (CVMP 2011), London, UK, 2011.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_head-mounted_2011,
title = {Head-mounted Photometric Stereo for Performance Capture},
author = {Andrew Jones and Graham Fyffe and Xueming Yu and Wan-Chun Ma and Jay Busch and Ryosuke Ichikari and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Head-mounted%20Photometric%20Stereo%20for%20Performance%20Capture.pdf},
year = {2011},
date = {2011-11-01},
booktitle = {8th European Conference on Visual Media Production (CVMP 2011)},
address = {London, UK},
abstract = {Head-mounted cameras are an increasingly important tool for capturing facial performances to drive virtual characters. They provide a fixed, unoccluded view of the face, useful for observing motion capture dots or as input to video analysis. However, the 2D imagery captured with these systems is typically affected by ambient light and generally fails to record subtle 3D shape changes as the face performs. We have developed a system that augments a head-mounted camera with LED-based photometric stereo. The system allows observation of the face independent of the ambient light and generates per-pixel surface normals so that the performance is recorded dynamically in 3D. The resulting data can be used for facial relighting or as better input to machine learning algorithms for driving an animated face.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Practical Image-Based Relighting and Editing with Spherical-Harmonics and Local Lights Proceedings Article
In: European Conference on Visual Media and Production (CVMP), 2011.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{tunwattanapong_practical_2011,
title = {Practical Image-Based Relighting and Editing with Spherical-Harmonics and Local Lights},
author = {Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Practical%20Image%20Based%20Relighting%20and%20Editing%20with%20Spherical%20Harmonics%20and%20Local%20Lights.pdf},
year = {2011},
date = {2011-11-01},
booktitle = {European Conference on Visual Media and Production (CVMP)},
abstract = {We present a practical technique for image-based relighting under environmental illumination which greatly reduces the number of required photographs compared to traditional techniques, while still achieving high quality editable relighting results. The proposed method employs an optimization procedure to combine spherical harmonics, a global lighting basis, with a set of local lights. Our choice of lighting basis captures both low and high frequency components of typical surface reflectance functions while generating close approximations to the ground truth with an order of magnitude less data. This technique benefits the acquisition process by reducing the number of required photographs, while simplifying the modification of reflectance data and enabling artistic lighting edits for post-production effects. Here, we demonstrate two desirable lighting edits, modifying light intensity and angular width, employing the proposed lighting basis.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Wilson, Cyrus A.; Alexander, Oleg; Tunwattanapong, Borom; Peers, Pieter; Ghosh, Abhijeet; Busch, Jay; Hartholt, Arno; Debevec, Paul
Facial Cartography: Interactive Scan Correspondence Proceedings Article
In: ACM/Eurographics Symposium on Computer Animation, 2011.
Abstract | Links | BibTeX | Tags: Graphics, Virtual Humans
@inproceedings{wilson_facial_2011,
title = {Facial Cartography: Interactive Scan Correspondence},
author = {Cyrus A. Wilson and Oleg Alexander and Borom Tunwattanapong and Pieter Peers and Abhijeet Ghosh and Jay Busch and Arno Hartholt and Paul Debevec},
url = {http://ict.usc.edu/pubs/Facial%20Cartography-%20Interactive%20Scan%20Correspondence.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {ACM/Eurographics Symposium on Computer Animation},
abstract = {We present a semi-automatic technique for computing surface correspondences between 3D facial scans in different expressions, such that scan data can be mapped into a common domain for facial animation. The technique can accurately correspond high-resolution scans of widely differing expressions – without requiring intermediate pose sequences – such that they can be used, together with reflectance maps, to create high-quality blendshape-based facial animation. We optimize correspondences through a combination of Image, Shape, and Internal forces, as well as Directable forces to allow a user to interactively guide and refine the solution. Key to our method is a novel representation, called an Active Visage, that balances the advantages of both deformable templates and correspondence computation in a 2D canonical domain. We show that our semi-automatic technique achieves more robust results than automated correspondence alone, and is more precise than is practical with unaided manual input.},
keywords = {Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Fyffe, Graham; Debevec, Paul
Optimized Local Blendshape Mapping for Facial Motion Retargeting Proceedings Article
In: SIGGRAPH 2011, Vancouver, Canada, 2011.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ma_optimized_2011,
title = {Optimized Local Blendshape Mapping for Facial Motion Retargeting},
author = {Wan-Chun Ma and Graham Fyffe and Paul Debevec},
url = {http://ict.usc.edu/pubs/Optimized%20Local%20Blendshape%20Mapping%20for%20Facial%20Motion%20Retargeting.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {SIGGRAPH 2011},
address = {Vancouver, Canada},
abstract = {One of the popular methods for facial motion retargeting is local blendshape mapping [Pighin and Lewis 2006], where each local facial region is controlled by a tracked feature (for example, a vertex in motion capture data). To map a target motion input onto blendshapes, a pose set is chosen for each facial region with minimal retargeting error. However, since the best pose set for each region is chosen independently, the solution likely has unorganized pose sets across the face regions, as shown in Figure 1(b). Therefore, even though every pose set matches the local features, the retargeting result is not guaranteed to be spatially smooth. In addition, previous methods ignored temporal coherence which is key for jitter-free results.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Hawkins, Tim; Watts, Chris; Ma, Wan-Chun; Debevec, Paul
Comprehensive Facial Performance Capture Proceedings Article
In: Eurographics 2011, 2011.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{fyffe_comprehensive_2011,
title = {Comprehensive Facial Performance Capture},
author = {Graham Fyffe and Tim Hawkins and Chris Watts and Wan-Chun Ma and Paul Debevec},
url = {http://ict.usc.edu/pubs/Comprehensive%20Facial%20Performance%20Capture.pdf},
year = {2011},
date = {2011-04-01},
booktitle = {Eurographics 2011},
abstract = {We present a system for recording a live dynamic facial performance, capturing highly detailed geometry and spatially varying diffuse and specular reflectance information for each frame of the performance. The result is a reproduction of the performance that can be rendered from novel viewpoints and novel lighting conditions, achieving photorealistic integration into any virtual environment. Dynamic performances are captured directly, without the need for any template geometry or static geometry scans, and processing is completely automatic, requiring no human input or guidance. Our key contributions are a heuristic for estimating facial reflectance information from gradient illumination photographs, and a geometry optimization framework that maximizes a principled likelihood function combining multi-view stereo correspondence and photometric stereo, using multi-resolution belief propagation. The output of our system is a sequence of geometries and reflectance maps, suitable for rendering in off-the-shelf software. We show results from our system rendered under novel viewpoints and lighting conditions, and validate our results by demonstrating a close match to ground truth photographs.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ko-Yun; Ma, Wan-Chun; Chang, Chun-Fa; Wang, Chuan-Chang; Debevec, Paul
A framework for locally retargeting and rendering facial performance Proceedings Article
In: Computer Animation and Virtual Worlds, pp. 159–167, 2011.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{liu_framework_2011,
title = {A framework for locally retargeting and rendering facial performance},
author = {Ko-Yun Liu and Wan-Chun Ma and Chun-Fa Chang and Chuan-Chang Wang and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Framework%20for%20Locally%20Retargeting%20and%20Rendering%20Facial%20Performance.pdf},
year = {2011},
date = {2011-04-01},
booktitle = {Computer Animation and Virtual Worlds},
volume = {22},
pages = {159–167},
abstract = {We present a facial motion retargeting method that enables the control of a blendshape rig according to marker-based motion capture data. The main purpose of the proposed technique is to allow a blendshape rig to create facial expressions, which conforms best to the current motion capture input, regardless the underlying blendshape poses. In other words, even though all of the blendshape poses may comprise symmetrical facial expressions only, our method is still able to create asymmetrical expressions without physically splitting any of them into more local blendshape poses. An automatic segmentation technique based on the analysis of facial motion is introduced to create facial regions for local retargeting. We also show that it is possible to blend normal maps for rendering in the same framework. Rendering with the blended normal map significantly improves surface appearance and details.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Ghosh, Abhijeet; Debevec, Paul; Morency, Louis-Philippe
Effect of Illumination on Automatic Expression Recognition: A Novel 3D Relightable Facial Database Proceedings Article
In: Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition, Santa Barbara, CA, 2011.
Abstract | Links | BibTeX | Tags: Graphics, Virtual Humans
@inproceedings{stratou_effect_2011,
title = {Effect of Illumination on Automatic Expression Recognition: A Novel 3D Relightable Facial Database},
author = {Giota Stratou and Abhijeet Ghosh and Paul Debevec and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Effect%20of%20Illumination%20on%20Automatic%20Expression%20Recognition-%20A%20Novel%203D%20Relightable%20Facial%20Database.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition},
address = {Santa Barbara, CA},
abstract = {One of the main challenges in facial expression recognition is illumination invariance. Our long-term goal is to develop a system for automatic facial expression recognition that is robust to light variations. In this paper, we introduce a novel 3D Relightable Facial Expression (ICT-3DRFE) database that enables experimentation in the fields of both computer graphics and computer vision. The database contains 3D models for 23 subjects and 15 expressions, as well as photometric information that allow for photorealistic rendering. It is also facial action units annotated, using FACS standards. Using the ICT-3DRFE database we create an image set of different expressions/illuminations to study the effect of illumination on automatic expression recognition. We compared the output scores from automatic recognition with expert FACS annotations and found that they agree when the illumination is uniform. Our results show that the output distribution of the automatic recognition can change significantly with light variations and sometimes causes the discrimination of two different expressions to be diminished. We propose a ratio-based light transfer method, to factor out unwanted illuminations from given images and show that it reduces the effect of illumination on expression recognition.},
keywords = {Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jurik, Joel; Jones, Andrew; Bolas, Mark; Debevec, Paul
Prototyping a Light Field Display Involving Direct Observation of a Video Projector Array Proceedings Article
In: IEEE International Workshop on Projector–Camera Systems (PROCAMS), Colorado Springs, CO, 2011.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jurik_prototyping_2011,
title = {Prototyping a Light Field Display Involving Direct Observation of a Video Projector Array},
author = {Joel Jurik and Andrew Jones and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Prototyping%20a%20Light%20Field%20Display%20Involving%20Direct%20Observation%20of%20a%20Video%20Projector%20Array.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {IEEE International Workshop on Projector–Camera Systems (PROCAMS)},
address = {Colorado Springs, CO},
abstract = {We present a concept for a full-parallax light field display achieved by having users look directly into an array of video projectors. Each projector acts as one angularly varying pixel, so the display's spatial resolution depends on the number of video projectors and the angular resolution depends on the pixel resolution of any one video projector. We prototype a horizontal-parallax-only arrangement by mechanically moving a single pico-projector to an array of positions, and use long-exposure photography to simulate video of a horizontal array. With this setup, we determine the minimal projector density required to produce a continuous image, and describe practical ways to achieve such density and to realize the resulting system. We finally show that if today's pico-projectors become sufficiently inexpensive, immersive full-parallax displays with arbitrarily high spatial and angular resolution will become possible.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Tchou, Chris; Gardner, Andrew; Hawkins, Tim; Poullis, Charis; Stumpfel, Jessi; Jones, Andrew; Yun, Nathaniel; Einarsson, Per; Lundgren, Therese; Fajardo, Marcos
Digitizing the Parthenon: Estimating Surface Reflectance under Measured Natural Illumination Book Section
In: Gallo, Giovanni (Ed.): Digital Imaging for Cultural Heritage Preservation: Analysis, Restoration, and Reconstruction of Ancient Artworks, pp. 159–182, CRC Press, 2011, ISBN: 978-1-4398-2173-2.
Abstract | Links | BibTeX | Tags: Graphics
@incollection{debevec_digitizing_2011,
title = {Digitizing the Parthenon: Estimating Surface Reflectance under Measured Natural Illumination},
author = {Paul Debevec and Chris Tchou and Andrew Gardner and Tim Hawkins and Charis Poullis and Jessi Stumpfel and Andrew Jones and Nathaniel Yun and Per Einarsson and Therese Lundgren and Marcos Fajardo},
editor = {Giovanni Gallo},
url = {http://ict.usc.edu/pubs/Digitizing%20the%20Parthenon-%20Estimating%20Surface%20Reflectance%20under%20Measured%20Natural%20Illumination.pdf},
isbn = {978-1-4398-2173-2},
year = {2011},
date = {2011-01-01},
booktitle = {Digital Imaging for Cultural Heritage Preservation: Analysis, Restoration, and Reconstruction of Ancient Artworks},
pages = {159–182},
publisher = {CRC Press},
abstract = {This edition presents the most prominent topics and applications of digital image processing, analysis, and computer graphics in the field of cultural heritage preservation. The text assumes prior knowledge of digital image processing and computer graphics fundamentals. Each chapter contains a table of contents, illustrations, and figures that elucidate the presented concepts in detail, as well as a chapter summary and a bibliography for further reading. Well-known experts cover a wide range of topics and related applications, including spectral imaging, automated restoration, computational reconstruction, digital reproduction, and 3D models.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {incollection}
}
2010
Swartout, William; Traum, David; Artstein, Ron; Noren, Dan; Debevec, Paul; Bronnenkant, Kerry; Williams, Josh; Leuski, Anton; Narayanan, Shrikanth; Piepol, Diane; Lane, H. Chad; Morie, Jacquelyn; Aggarwal, Priti; Liewer, Matt; Chiang, Jen-Yuan; Gerten, Jillian; Chu, Selina; White, Kyle
Virtual Museum Guides Demonstration Proceedings Article
In: IEEE Workshop on Spoken Language Technology, Berkeley, CA, 2010.
Links | BibTeX | Tags: Graphics, Learning Sciences, Virtual Humans
@inproceedings{swartout_virtual_2010,
title = {Virtual Museum Guides Demonstration},
author = {William Swartout and David Traum and Ron Artstein and Dan Noren and Paul Debevec and Kerry Bronnenkant and Josh Williams and Anton Leuski and Shrikanth Narayanan and Diane Piepol and H. Chad Lane and Jacquelyn Morie and Priti Aggarwal and Matt Liewer and Jen-Yuan Chiang and Jillian Gerten and Selina Chu and Kyle White},
url = {http://ict.usc.edu/pubs/Virtual%20Museum%20Guides%20Demonstration.pdf},
year = {2010},
date = {2010-12-01},
booktitle = {IEEE Workshop on Spoken Language Technology},
address = {Berkeley, CA},
keywords = {Graphics, Learning Sciences, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Chen, Tongbo; Peers, Pieter; Wilson, Cyrus A.; Debevec, Paul
Circularly Polarized Spherical Illumination Reflectometry Proceedings Article
In: SIGGRAPH Asia, 2010.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ghosh_circularly_2010,
title = {Circularly Polarized Spherical Illumination Reflectometry},
author = {Abhijeet Ghosh and Tongbo Chen and Pieter Peers and Cyrus A. Wilson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Circularly%20Polarized%20Spherical%20Illumination%20Reflectometry.pdf},
year = {2010},
date = {2010-12-01},
booktitle = {SIGGRAPH Asia},
abstract = {We present a novel method for surface reflectometry from a few ob- servations of a scene under a single uniform spherical field of circu- larly polarized illumination. The method is based on a novel anal- ysis of the Stokes reflectance field of circularly polarized spherical illumination and yields per-pixel estimates of diffuse albedo, spec- ular albedo, index of refraction, and specular roughness of isotropic BRDFs. To infer these reflectance parameters, we measure the Stokes parameters of the reflected light at each pixel by taking four photographs of the scene, consisting of three photographs with dif- ferently oriented linear polarizers in front of the camera, and one additional photograph with a circular polarizer. The method only assumes knowledge of surface orientation, for which we make a few additional photometric measurements. We verify our method with three different lighting setups, ranging from specialized to off-the- shelf hardware, which project either discrete or continuous fields of spherical illumination. Our technique offers several benefits: it estimates a more detailed model of per-pixel surface reflectance pa- rameters than previous work, it requires a relatively small number of measurements, it is applicable to a wide range of material types, and it is completely viewpoint independent.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}