Publications
Search
Ghosh, Abhijeet; Heidrich, Wolfgang; Achutha, Shruthi; O'Toole, Matthew
A Basis Illumination Approach to BRDF Measurement Journal Article
In: International Journal on Computer Vision, vol. 90, no. 2, pp. 183–197, 2010.
@article{ghosh_basis_2010,
title = {A Basis Illumination Approach to BRDF Measurement},
author = {Abhijeet Ghosh and Wolfgang Heidrich and Shruthi Achutha and Matthew O'Toole},
url = {http://ict.usc.edu/pubs/A%20Basis%20Illumination%20Approach%20to%20BRDF%20Measurement.pdf},
doi = {10.1007/s11263-008-0151-7},
year = {2010},
date = {2010-01-01},
journal = {International Journal on Computer Vision},
volume = {90},
number = {2},
pages = {183–197},
abstract = {Realistic descriptions of surface reflectance have long been a topic of interest in both computer vision and computer graphics research. In this paper, we describe a novel high speed approach for the acquisition of bidirectional reflectance distribution functions (BRDFs). We develop a new theory for directly measuring BRDFs in a basis representation by projecting incident light as a sequence of basis functions from a spherical zone of directions. We derive an orthonormal basis over spherical zones that is ideally suited for this task. BRDF values outside the zonal directions are extrapolated by re-projecting the zonal measurements into a spherical harmonics basis, or by fitting analytical reflection models to the data. For specular materials, we experiment with alternative basis acquisition approaches such as compressive sensing with a random subset of the higher order orthonormal zonal basis functions, as well as measuring the response to basis defined by an analytical model as a way of optically fitting the BRDF to such a representation. We verify this approach with a compact optical setup that requires no moving parts and only a small number of image measurements. Using this approach, a BRDF can be measured in just a few minutes.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Combining Spherical Harmonics and Point-Source Illumination for Efficient Image-Based Relighting Proceedings Article
In: SIGGRAPH 2010, 2010.
@inproceedings{tunwattanapong_combining_2010,
title = {Combining Spherical Harmonics and Point-Source Illumination for Efficient Image-Based Relighting},
author = {Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Combining%20Spherical%20Harmonics%20and%20Point-Source%20Illumination%20for%20Efficient%20Image-Based%20Relighting.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {SIGGRAPH 2010},
abstract = {Traditional image-based relighting technique requires capturing a dense set of lighting directions surrounding the object and uses the linearity of light transport property together with the illumina- tion data of the target environment to relight an object [Debevec et al. 2000]. However, this can be a very data intensive process because such datasets typically involve photographing hundreds of lighting directions. It is also difficult to modify or edit the data in post-production environments because the data is high dimen- sional. Adjustment has to be made in several dimensions in order to add artistic effects to the result. Difficulty in acquisition pro- cess is also one of the main problems. The capturing process typi- cally lasts long enough to only be suitable for static objects. In this poster, we present a relighting technique which greatly reduces the number of images required for relighting, and still generate real- istic results. We combine spherical harmonics with point lights to achieve efficient image based relighting. Spherical harmonics can efficiently capture smooth low frequency illumination [Ramamoor- thi and Hanrahan 2001] while point lights capture high frequency directional illumination. Combining both techniques, we create re- lighting results which have both low and high frequency illumi- nation data. This technique also benefits the acquisition process by reducing the number of required photographs which results in shorter capture time. In addition, fewer dimensions of the data can potentially simplify modification or editing of reflectance data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Wilson, Cyrus A.; Debevec, Paul
Cosine Lobe Based Relighting from Gradient Illumination Photographs Proceedings Article
In: Conference on Visual Media Production, London, UK, 2009.
@inproceedings{fyffe_cosine_2009,
title = {Cosine Lobe Based Relighting from Gradient Illumination Photographs},
author = {Graham Fyffe and Cyrus A. Wilson and Paul Debevec},
url = {http://www.ict.usc.edu/pubs/Cosine%20Lobe%20Based%20Relighting%20from%20Gradient%20Illumination%20Photographs.pdf},
year = {2009},
date = {2009-11-01},
booktitle = {Conference on Visual Media Production},
address = {London, UK},
abstract = {We present an image-based method for relighting a scene by analytically fitting a cosine lobe to the reflectance function at each pixel, based on gradient illumination pho- tographs. Realistic relighting results for many materials are obtained using a single per-pixel cosine lobe obtained from just two color photographs: one under uniform white illumi- nation and the other under colored gradient illumination. For materials with wavelength-dependent scattering, a better fit can be obtained using independent cosine lobes for the red, green, and blue channels, obtained from three monochromatic gradient illumination conditions instead of the colored gradient condition. We explore two cosine lobe reflectance functions, both of which allow an analytic fit to the gradient conditions. One is non-zero over half the sphere of lighting directions, which works well for diffuse and specular materials, but fails for materials with broader scattering such as fur. The other is non-zero everywhere, which works well for broadly scattering materials and still produces visually plausible results for diffuse and specular materials. Additionally, we estimate scene geometry from the photometric normals to produce hard shadows cast by the geometry, while still reconstructing the input photographs exactly.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Alexander, Oleg; Rogers, Mike; Lambeth, William; Chiang, Matt; Debevec, Paul
Creating a Photoreal Digital Actor: The Digital Emily Project Proceedings Article
In: IEEE European Conference on Visual Media Production (CVMP), London, UK, 2009.
@inproceedings{alexander_creating_2009,
title = {Creating a Photoreal Digital Actor: The Digital Emily Project},
author = {Oleg Alexander and Mike Rogers and William Lambeth and Matt Chiang and Paul Debevec},
url = {https://dl.acm.org/doi/pdf/10.1145/1667239.1667251},
year = {2009},
date = {2009-11-01},
booktitle = {IEEE European Conference on Visual Media Production (CVMP)},
number = {ICT TR 04 2009},
address = {London, UK},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {The Digital Emily Project is a collaboration between facial animation company Image Metrics and the Graphics Laboratory at the University of Southern California's Institute for Creative Technologies to achieve one of the world's first photorealistic digital facial performances. The project leverages latest-generation techniques in high-resolution face scanning, character rigging, video-based facial animation, and compositing. An actress was first filmed on a studio set speaking emotive lines of dialog in high definition. The lighting on the set was captured as a high dynamic range light probe image. The actress' face was then three-dimensionally scanned in thirty-three facial expressions showing different emotions and mouth and eye movements using a high-resolution facial scanning process accurate to the level of skin pores and fine wrinkles. Lighting-independent diffuse and specular reflectance maps were also acquired as part of the scanning process. Correspondences between the 3D expression scans were formed using a semi-automatic process, allowing a blendshape facial animation rig to be constructed whose expressions closely mirrored the shapes observed in the rich set of facial scans; animated eyes and teeth were also added to the model. Skin texture detail showing dynamic wrinkling was converted into multiresolution displacement maps also driven by the blend shapes. A semi-automatic video-based facial animation system was then used to animate the 3D face rig to match the performance seen in the original video, and this performance was tracked onto the facial motion in the studio video. The final face was illuminated by the captured studio illumination and shading using the acquired reflectance maps with a skin translucency shading algorithm. Using this process, the project was able to render a synthetic facial performance which was generally accepted as being a real face.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Lang, Magnus; Fyffe, Graham; Yu, Xueming; Busch, Jay; McDowall, Ian; Bolas, Mark; Debevec, Paul
Achieving Eye Contact in a One-to-Many 3D Video Teleconferencing System Journal Article
In: ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2009, vol. 28, no. 3, 2009.
@article{jones_achieving_2009,
title = {Achieving Eye Contact in a One-to-Many 3D Video Teleconferencing System},
author = {Andrew Jones and Magnus Lang and Graham Fyffe and Xueming Yu and Jay Busch and Ian McDowall and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Achieving%20Eye%20Contact%20in%20a%20One-to-Many%203D%20Video%20Teleconferencing%20System.pdf},
year = {2009},
date = {2009-08-01},
journal = {ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2009},
volume = {28},
number = {3},
abstract = {We present a set of algorithms and an associated display system capable of producing correctly rendered eye contact between a three-dimensionally transmitted remote participant and a group of observers in a 3D teleconferencing system. The participant's face is scanned in 3D at 30Hz and transmitted in real time to an autostereo- scopic horizontal-parallax 3D display, displaying him or her over more than a 180â—¦ field of view observable to multiple observers. To render the geometry with correct perspective, we create a fast vertex shader based on a 6D lookup table for projecting 3D scene vertices to a range of subject angles, heights, and distances. We generalize the projection mathematics to arbitrarily shaped display surfaces, which allows us to employ a curved concave display surface to focus the high speed imagery to individual observers. To achieve two-way eye contact, we capture 2D video from a cross-polarized camera reflected to the position of the virtual participant's eyes, and display this 2D video feed on a large screen in front of the real par- ticipant, replicating the viewpoint of their virtual self. To achieve correct vertical perspective, we further leverage this image to track the position of each audience member's eyes, allowing the 3D dis- play to render correct vertical perspective for each of the viewers around the device. The result is a one-to-many 3D teleconferenc- ing system able to reproduce the effects of gaze, attention, and eye contact generally missing in traditional teleconferencing systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wilson, Cyrus A.; Ghosh, Abhijeet; Peers, Pieter; Chiang, Jen-Yuan; Busch, Jay; Debevec, Paul
2D and 3D facial correspondences via photometric alignment Proceedings Article
In: SIGGRAPH, New Orleans, LA, 2009, ISBN: 978-1-60558-834-6.
@inproceedings{wilson_2d_2009,
title = {2D and 3D facial correspondences via photometric alignment},
author = {Cyrus A. Wilson and Abhijeet Ghosh and Pieter Peers and Jen-Yuan Chiang and Jay Busch and Paul Debevec},
url = {http://ict.usc.edu/pubs/2D%20and%203D%20facial%20correspondences%20via%20photometric%20alignment.pdf},
doi = {10.1145/1597990.1598018},
isbn = {978-1-60558-834-6},
year = {2009},
date = {2009-08-01},
booktitle = {SIGGRAPH},
address = {New Orleans, LA},
abstract = {Capturing facial geometry that is high-resolution, yet easy to animate, remains a difficult challenge. While a single scanned geometry may be straightforward to animate smoothly, it may not always yield realistic fine scale detail when deformed into different facial expressions. Combining scans of multiple facial expressions, however, is only practical if geometrical correspondences between the different scanned expressions are available. Correspondences obtained based on locations of facial landmarks or of placed markers are often sparse, especially compared to fine-scale structures such as individual skin pores. The resulting misalignment of fine detail can introduce artifacts or blur out details we wish to preserve.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Chen, Tongbo; Peers, Pieter; Wilson, Cyrus A.; Debevec, Paul
Estimating Specular Roughness and Anisotropy from Second Order Spherical Gradient Illumination Proceedings Article
In: Computer Graphics Forum, pp. 4, 2009.
@inproceedings{ghosh_estimating_2009,
title = {Estimating Specular Roughness and Anisotropy from Second Order Spherical Gradient Illumination},
author = {Abhijeet Ghosh and Tongbo Chen and Pieter Peers and Cyrus A. Wilson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Estimating%20Specular%20Roughness%20and%20Anisotropy%20from%20Second%20Order%20Spherical%20Gradient%20Illumination.pdf},
year = {2009},
date = {2009-06-01},
booktitle = {Computer Graphics Forum},
volume = {28},
pages = {4},
abstract = {This paper presents a novel method for estimating specular roughness and tangent vectors, per surface point, from polarized second order spherical gradient illumination patterns. We demonstrate that for isotropic BRDFs, only three second order spherical gradients are sufficient to robustly estimate spatially varying specular roughness. For anisotropic BRDFs, an additional two measurements yield specular roughness and tangent vectors per surface point. We verify our approach with different illumination configurations which project both discrete and continuous fields of gradient illumination. Our technique provides a direct estimate of the per-pixel specular roughness and thus does not require off-line numerical optimization that is typical for the measure-and-fit approach to classical BRDF modeling.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tsiartas, Andreas; Ghosh, Prasanta Kumar; Georgiou, Panayiotis G.; Narayanan, Shrikanth
Robust Word Boundary Detection in Spontaneous Speech using Acoustic and Lexical Clues Proceedings Article
In: Proceedings of ICASSP, Taipei, Taiwan, 2009.
@inproceedings{tsiartas_robust_2009,
title = {Robust Word Boundary Detection in Spontaneous Speech using Acoustic and Lexical Clues},
author = {Andreas Tsiartas and Prasanta Kumar Ghosh and Panayiotis G. Georgiou and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Robust%20Word%20Boundary%20Detection%20in%20Spontaneous%20Speech%20using%20Acoustic%20and%20Lexical%20Clues.pdf},
year = {2009},
date = {2009-04-01},
booktitle = {Proceedings of ICASSP},
address = {Taipei, Taiwan},
abstract = {We consider the problem of word boundary detection in spontaneous speech utterances. Acoustic features have been well explored in the literature in the context of word boundary detection; however, in spontaneous speech of Switchboard-I corpus, we found that the accuracy of word boundary detec- tion using acoustic features is poor (F-score ∼ 0.63). We pro- pose a new feature - that captures lexical cues in the context of the word boundary detection problem. We show that includ- ing proposed lexical feature along with the usual acoustic fea- tures, the accuracy of the word boundary detection improves considerably (F-score ∼ 0.81). We also demonstrate the ro- bustness of our proposed feature in presence of different noise levels for additive white and pink noise.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lamond, Bruce; Peers, Pieter; Ghosh, Abhijeet; Debevec, Paul
Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination, Supplemental Material Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2009, 2009.
@techreport{lamond_image-based_2009,
title = {Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination, Supplemental Material},
author = {Bruce Lamond and Pieter Peers and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2009.pdf},
year = {2009},
date = {2009-01-01},
number = {ICT TR 01 2009},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present an image-based method for separating dif- fuse and specular reflections using environmental struc- tured illumination. Two types of structured illumination are discussed: phase-shifted sine wave patterns, and phase- shifted binary stripe patterns. In both cases the low-pass filtering nature of diffuse reflections is utilized to separate the reflection components. We illustrate our method on a wide range of example scenes and applications.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Peers, Pieter; Mahajan, Dhruv K.; Lamond, Bruce; Ghosh, Abhijeet; Matusik, Wojciech; Ramamoorth, Ravi; Debevec, Paul
Compressive Light Transport Sensing Journal Article
In: ACM Transactions on Graphics, vol. 28, no. 1, 2009.
@article{peers_compressive_2009,
title = {Compressive Light Transport Sensing},
author = {Pieter Peers and Dhruv K. Mahajan and Bruce Lamond and Abhijeet Ghosh and Wojciech Matusik and Ravi Ramamoorth and Paul Debevec},
url = {http://ict.usc.edu/pubs/Compressive%20Light%20Transport%20Sensing.pdf},
year = {2009},
date = {2009-01-01},
journal = {ACM Transactions on Graphics},
volume = {28},
number = {1},
abstract = {In this paper we propose a new framework for capturing light transport data of a real scene, based on the recently developed theory of compressive sensing. Compressive sensing offers a solid mathematical framework to infer a sparse signal from a limited number of non-adaptive measurements. Besides introducing compressive sensing for fast acquisition of light transport to computer graphics, we develop several innovations that address specific challenges for image-based relighting, and which may have broader implications. We develop a novel hierarchical decoding algorithm that improves reconstruction quality by exploiting inter-pixel coherency relations. Additionally, we design new non-adaptive illumination patterns that minimize measurement noise and further improve reconstruction quality. We illustrate our framework by capturing detailed high-resolution reflectance fields for image-based relighting.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vlasic, Daniel; Peers, Pieter; Baran, Ilya; Debevec, Paul; Popovic, Jovan; Rusinkiewicz, Szymon; Matusik, Wojciech
Dynamic Shape Capture using Multi-View Photometric Stereo Journal Article
In: ACM Transactions on Graphics, vol. 28, no. 5, 2009.
@article{vlasic_dynamic_2009,
title = {Dynamic Shape Capture using Multi-View Photometric Stereo},
author = {Daniel Vlasic and Pieter Peers and Ilya Baran and Paul Debevec and Jovan Popovic and Szymon Rusinkiewicz and Wojciech Matusik},
url = {http://ict.usc.edu/pubs/Dynamic%20Shape%20Capture%20using%20Multi-View%20Photometric%20Stereo.pdf},
year = {2009},
date = {2009-01-01},
journal = {ACM Transactions on Graphics},
volume = {28},
number = {5},
abstract = {We describe a system for high-resolution capture of moving 3D geometry, beginning with dynamic normal maps from multiple views. The normal maps are captured using active shape-from-shading (photometric stereo), with a large lighting dome providing a series of novel hemispherical lighting configurations. To compensate for low-frequency deformation, we perform multi-view matching and thin-plate spline deformation on the initial surfaces obtained by integrating the normal maps. Next, the corrected meshes are merged into a single mesh using a volumetric method. The final output is a set of meshes, which were impossible to produce with previous methods. The meshes exhibit details on the order of a few millimeters, and represent the performance over human-size working volumes at a temporal resolution of 60Hz.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yamada, Hideshi; Peers, Pieter; Debevec, Paul
Compact Representation of Reflectance Fields using Clustered Sparse Residual Factorization Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2009, 2009.
@techreport{yamada_compact_2009,
title = {Compact Representation of Reflectance Fields using Clustered Sparse Residual Factorization},
author = {Hideshi Yamada and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-02-2009.pdf},
year = {2009},
date = {2009-01-01},
number = {ICT TR 02 2009},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel compression method for fixed viewpoint re- flectance fields, captured for example by a Light Stage. Our com- pressed representation consists of a global approximation that ex- ploits the similarities between the reflectance functions of different pixels, and a local approximation that encodes the per-pixel resid- ual with the global approximation. Key to our method is a clustered sparse residual factorization. This sparse residual factorization en- sures that the per-pixel residual matrix is as sparse as possible, en- abling a compact local approximation. Finally, we demonstrate that the presented compact representation is well suited for high-quality real-time rendering.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lamond, Bruce; Peers, Pieter; Ghosh, Abhijeet; Debevec, Paul
Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination Proceedings Article
In: IEEE International Conference on Computational Photography, 2009.
@inproceedings{lamond_image-based_2009-1,
title = {Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination},
author = {Bruce Lamond and Pieter Peers and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-based%20Separation%20of%20Diffuse%20and%20Specular%20Reflections%20using%20Environmental%20Structured%20Illumination.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {IEEE International Conference on Computational Photography},
abstract = {We present an image-based method for separating diffuse and specular reflections using environmental structured illumination. Two types of structured illumination are discussed: phase-shifted sine wave patterns, and phase-shifted binary stripe patterns. In both cases the low-pass filtering nature of diffuse reflections is utilized to separate the reflection components. We illustrate our method on a wide range of example scenes and applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Hawkins, Tim; Peers, Pieter; Frederiksen, Sune; Debevec, Paul
Practical Modeling and Acquisition of Layered Facial Reflectance Journal Article
In: ACM Transaction on Graphics, vol. 27, no. 5, 2008.
@article{ghosh_practical_2008,
title = {Practical Modeling and Acquisition of Layered Facial Reflectance},
author = {Abhijeet Ghosh and Tim Hawkins and Pieter Peers and Sune Frederiksen and Paul Debevec},
url = {http://ict.usc.edu/pubs/Practical%20Modeling%20and%20Acquisition%20of%20Layered%20Facial%20Reflectance.pdf},
year = {2008},
date = {2008-12-01},
journal = {ACM Transaction on Graphics},
volume = {27},
number = {5},
abstract = {We present a practical method for modeling layered facial reflectance consisting of specular reflectance, single scattering, and shallow and deep subsurface scattering. We estimate parameters of appropriate reflectance models for each of these layers from just 20 photographs recorded in a few seconds from a single viewpoint. We extract spatially-varying specular reflectance and single-scattering parameters from polarization-difference images under spherical and point source illumination. Next, we employ direct-indirect separation to decompose the remaining multiple scattering observed under cross-polarization into shallow and deep scattering components to model the light transport through multiple layers of skin. Finally, we match appropriate diffusion models to the extracted shallow and deep scattering components for different regions on the face. We validate our technique by comparing renderings of subjects to reference photographs recorded from novel viewpoints and under novel illumination conditions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ma, Wan-Chun; Jones, Andrew; Hawkins, Tim; Chiang, Jen-Yuan; Debevec, Paul
A high-resolution geometry capture system for facial performance Proceedings Article
In: SIGGRAPH, Los Angeles, CA, 2008.
@inproceedings{ma_high-resolution_2008,
title = {A high-resolution geometry capture system for facial performance},
author = {Wan-Chun Ma and Andrew Jones and Tim Hawkins and Jen-Yuan Chiang and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20high-resolution%20geometry%20capture%20system%20for%20facial%20performance.pdf},
year = {2008},
date = {2008-08-01},
booktitle = {SIGGRAPH},
address = {Los Angeles, CA},
abstract = {Results The two cameras capture data at a resolution of 2400× 1800 (Bayer pattern). With a internal RAM storage of 12GB, the maximum recording time is around 5 seconds. The result of each scan contains a high resolution mesh that usually consists of 1M triangles, a smoothed medium resolution mesh, a color texture, a world-space normal map, and a displacement map represents the difference between the high resolution mesh and the smoothed mesh.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Peers, Pieter; Mahajan, Dhruv K.; Lamond, Bruce; Ghosh, Abhijeet; Matusik, Wojciech; Ramamoorthi, Ravi; Debevec, Paul
Compressive Light Transport Sensing Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 05 2008, 2008.
@techreport{peers_compressive_2008,
title = {Compressive Light Transport Sensing},
author = {Pieter Peers and Dhruv K. Mahajan and Bruce Lamond and Abhijeet Ghosh and Wojciech Matusik and Ravi Ramamoorthi and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT%20TR%2005%202008.pdf},
year = {2008},
date = {2008-01-01},
number = {ICT TR 05 2008},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this paper we propose a new framework for capturing light trans- port data of a real scene, based on the recently developed theory of compressive sensing. Compressive sensing offers a solid math- ematical framework to infer a sparse signal from a limited number of non-adaptive measurements. Besides introducing compressive sensing for fast acquisition of light transport to computer graphics, we develop several innovations that address specific challenges for image-based relighting, and which may have broader implications. We develop a novel hierarchical decoding algorithm that improves reconstruction quality by exploiting inter-pixel coherency relations. Additionally, we design new non-adaptive illumination patterns that minimize measurement noise and further improve reconstruction quality. We illustrate our framework by capturing detailed high- resolution reflectance fields for image-based relighting.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Ma, Wan-Chun; Jones, Andrew; Chiang, Jen-Yuan; Hawkins, Tim; Frederiksen, Sune; Peers, Pieter; Vukovic, Marko; Ouhyoung, Ming; Debevec, Paul
Facial Performance Synthesis using Deformation-Driven Polynomial Displacement Maps Journal Article
In: ACM Transactions on Graphics, vol. 27, no. 5, 2008.
@article{ma_facial_2008,
title = {Facial Performance Synthesis using Deformation-Driven Polynomial Displacement Maps},
author = {Wan-Chun Ma and Andrew Jones and Jen-Yuan Chiang and Tim Hawkins and Sune Frederiksen and Pieter Peers and Marko Vukovic and Ming Ouhyoung and Paul Debevec},
url = {http://ict.usc.edu/pubs/Facial%20Performance%20Synthesis%20using%20Deformation-Driven%20Polynomial%20Displacement%20Maps.pdf},
year = {2008},
date = {2008-01-01},
journal = {ACM Transactions on Graphics},
volume = {27},
number = {5},
abstract = {We present a novel method for acquisition, modeling, compression, and synthesis of realistic facial deformations using polynomial displacement maps. Our method consists of an analysis phase where the relationship between motion capture markers and detailed facial geometry is inferred, and a synthesis phase where novel detailed animated facial geometry is driven solely by a sparse set of motion capture markers. For analysis, we record the actor wearing facial markers while performing a set of training expression clips. We capture real-time high-resolution facial deformations, including dynamic wrinkle and pore detail, using interleaved structured light 3D scanning and photometric stereo. Next, we compute displacements between a neutral mesh driven by the motion capture markers and the high-resolution captured expressions. These geometric displacements are stored in a polynomial displacement map which is parameterized according to the local deformations of the motion capture dots. For synthesis, we drive the polynomial displacement map with new motion capture data. This allows the recreation of large-scale muscle deformation, medium and fine wrinkles, and dynamic skin pore detail. Applications include the compression of existing performance data and the synthesis of new performances. Our technique is independent of the underlying geometry capture system and can be used to automatically generate high-frequency wrinkle and pore details on top of many existing facial animation systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jones, Andrew; Chiang, Jen-Yuan; Ghosh, Abhijeet; Lang, Magnus; Hullin, Matthias; Busch, Jay; Debevec, Paul
Real-time Geometry and Reflectance Capture for Digital Face Replacement Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 04 2008, 2008.
@techreport{jones_real-time_2008,
title = {Real-time Geometry and Reflectance Capture for Digital Face Replacement},
author = {Andrew Jones and Jen-Yuan Chiang and Abhijeet Ghosh and Magnus Lang and Matthias Hullin and Jay Busch and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-04-2008.pdf},
year = {2008},
date = {2008-01-01},
number = {ICT TR 04 2008},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
An Interactive 360° Light Field Display Proceedings Article
In: SIGGRAPH, San Diego, CA, 2007.
@inproceedings{jones_interactive_2007,
title = {An Interactive 360° Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
abstract = {While a great deal of computer generated imagery is modeled and rendered in 3D, the vast majority of this 3D imagery is shown on 2D displays. Various forms of 3D displays have been contemplated and constructed for at least one hundred years [Lippman 1908], but only recent evolutions in digital capture, computation, and display have made functional and practical 3D displays possible.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Chabert, Charles-Felix; Bolas, Mark; Peers, Pieter; Debevec, Paul
A system for high-resolution face scanning based on polarized spherical illumination Proceedings Article
In: SIGGRAPH, San Diego, CA, 2007.
@inproceedings{ma_system_2007,
title = {A system for high-resolution face scanning based on polarized spherical illumination},
author = {Wan-Chun Ma and Tim Hawkins and Charles-Felix Chabert and Mark Bolas and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20system%20for%20high-resolution%20face%20scanning%20based%20on%20polarized%20spherical%20illumination.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.