Publications
Search
Phan, Thai; Krum, David M.; Bolas, Mark
ShodanVR: Immersive Visualization of Text Records from the Shodan Database Inproceedings
In: Proceedings of the 2016 Workshop on Immersive Analytics (IA), IEEE, Greenville,SC, 2016, ISBN: 978-1-5090-0834-6.
@inproceedings{phan_shodanvr_2016,
title = {ShodanVR: Immersive Visualization of Text Records from the Shodan Database},
author = {Thai Phan and David M. Krum and Mark Bolas},
url = {http://ieeexplore.ieee.org/document/7932379/?part=1},
doi = {10.1109/IMMERSIVE.2016.7932379},
isbn = {978-1-5090-0834-6},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of the 2016 Workshop on Immersive Analytics (IA)},
publisher = {IEEE},
address = {Greenville,SC},
abstract = {ShodanVR is an immersive visualization for querying and displaying text records from the Shodan database of Internet connected devices. Shodan provides port connection data retrieved from servers, routers, and other networked devices [2]. Cybersecurity professionals can glean this data for device populations, software versions, and potential security vulnerabilities [1].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nilsson, Niels; Suma, Evan; Nordahl, Rolf; Bolas, Mark; Serafin, Stefania
Estimation of Detection Thresholds for Audiovisual Rotation Gains Inproceedings
In: IEEE Virtual Reality 2016, pp. ID: A22, IEEE, Greenville, SC, 2016.
@inproceedings{nilsson_estimation_2016,
title = {Estimation of Detection Thresholds for Audiovisual Rotation Gains},
author = {Niels Nilsson and Evan Suma and Rolf Nordahl and Mark Bolas and Stefania Serafin},
url = {http://ieeevr.org/2016/posters/},
year = {2016},
date = {2016-03-01},
booktitle = {IEEE Virtual Reality 2016},
pages = {ID: A22},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirection techniques allow users to explore large virtual environments on foot while remaining within a limited physical space. However, research has primarily focused on redirection through manipulation of visual stimuli. We describe a within-subjects study (n=31) exploring if participants’ ability to detect differences between real and virtual rotations is influenced by the addition of sound that is spatially aligned with its virtual source. The results revealed similar detection thresholds for conditions involving moving audio, static audio, and no audio. This may be viewed as an indication of visual dominance during scenarios such as the one used for the current study.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Kang, Sin-Hwa; Phan, Thai; Dukes, Lauren Cairco; Bolas, Mark
Head Mounted Projection for Enhanced Gaze in Social Interactions Inproceedings
In: 2016 IEEE Virtual Reality (VR), pp. 209–210, IEEE, Greenville, SC, 2016.
@inproceedings{krum_head_2016,
title = {Head Mounted Projection for Enhanced Gaze in Social Interactions},
author = {David M. Krum and Sin-Hwa Kang and Thai Phan and Lauren Cairco Dukes and Mark Bolas},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504727},
doi = {10.1109/VR.2016.7504727},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Virtual Reality (VR)},
pages = {209--210},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals. This could cause perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing a weapon. We discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hutton, Courtney; Suma, Evan
A Realistic Walking Model for Enhancing Redirection in Virtual Reality Inproceedings
In: 2016 IEEE Virtual Reality (VR), pp. 183–184, IEEE, Greenville, SC, 2016.
@inproceedings{hutton_realistic_2016,
title = {A Realistic Walking Model for Enhancing Redirection in Virtual Reality},
author = {Courtney Hutton and Evan Suma},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504714},
doi = {10.1109/VR.2016.7504714},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Virtual Reality (VR)},
pages = {183--184},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirected walking algorithms require the prediction of human motion in order to effectively steer users away from the boundaries of the physical space. While a virtual walking trajectory may be represented using straight lines connecting waypoints of interest, this simple model does not accurately represent typical user behavior. In this poster we present a more realistic walking model for use in real-time virtual environments that employ redirection techniques. We implemented the model within a framework that can be used for simulation of redirected walking within different virtual and physical environments. Such simulations are useful for the evaluation of redirected walking algorithms and the tuning of parameters under varying conditions. Additionally, the model can also be used to animate an artificial humanoid “ghost walker” to provide a visual demonstration of redirected walking in virtual reality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Suma, Evan; Grechkin, Timofey; Enloe, Michael
Procedural Reconstruction of Simulation Terrain Using Drones Inproceedings
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
@inproceedings{mcalinden_procedural_2015,
title = {Procedural Reconstruction of Simulation Terrain Using Drones},
author = {Ryan McAlinden and Evan Suma and Timofey Grechkin and Michael Enloe},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Photogrammetric techniques for constructing 3D virtual environments have previously been plagued by expensive equipment, imprecise and visually unappealing results. However, with the introduction of low-cost, off-the-shelf (OTS) unmanned aerial systems (UAS), lighter and capable cameras, and more efficient software techniques for reconstruction, the modeling and simulation (M&S) community now has available to it new types of virtual assets that are suited for modern-day games and simulations. This paper presents an approach for fully autonomously collecting, processing, storing and rendering highly-detailed geo-specific terrain data using these OTS techniques and methods. We detail the types of equipment used, the flight parameters, the processing and reconstruction pipeline, and finally the results of using the dataset in a game/simulation engine. A key objective of the research is procedurally segmenting the terrain into usable features that the engine can interpret – i.e. distinguishing between roads, buildings, vegetation, etc. This allows the simulation core to assign attributes related to physics, lighting, collision cylinders and navigation meshes that not only support basic rendering of the model but introduce interaction with it. The results of this research are framed in the context of a new paradigm for geospatial collection, analysis and simulation. Specifically, the next generation of M&S systems will need to integrate environmental representations that have higher detail and richer metadata while ensuring a balance between performance and usability.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Phan, Thai; Bolas, Mark
"Hi, It's Me Again!": Virtual Coaches over Mobile Video Inproceedings
In: Proceedings of the 3rd International Conference on Human-Agent Interaction, pp. 183–186, ACM, Daegu, Korea, 2015, ISBN: 978-1-4503-3527-0.
@inproceedings{kang_hi_2015,
title = {"Hi, It's Me Again!": Virtual Coaches over Mobile Video},
author = {Sin-Hwa Kang and David M. Krum and Thai Phan and Mark Bolas},
url = {http://dl.acm.org/citation.cfm?id=2814970},
isbn = {978-1-4503-3527-0},
year = {2015},
date = {2015-10-01},
booktitle = {Proceedings of the 3rd International Conference on Human-Agent Interaction},
pages = {183--186},
publisher = {ACM},
address = {Daegu, Korea},
abstract = {We believe that virtual humans presented over video chat services, such as Skype via smartphones, can be an effective way to deliver innovative applications where social interactions are important, such as counseling and coaching. We hypothesize that the context of a smartphone communication channel, i.e. how a virtual human is presented within a smartphone app, and indeed, the nature of that app, can profoundly affect how a real human perceives the virtual human. We have built an apparatus that allows virtual humans to initiate, receive, and interact over video calls using Skype or any similar service. With this platform, we are examining effective designs and social implications of virtual humans that interact over mobile video. The current study examines a relationship involving repeated counseling-style interactions with a virtual human, leveraging the virtual human’s ability to call and interact with a real human on multiple occasions over a period of time. The results and implications of this preliminary study suggest that repeated interactions may improve perceived social characteristics of the virtual human.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
Physical Space Requirements for Redirected Walking: How Size and Shape Affect Performance Inproceedings
In: Eurographics Symposium on Virtual Environments (2015), pp. 93–100, The Eurographics Association, Kyoto, Japan, 2015, ISBN: 978-3-905674-84-2.
@inproceedings{azmandian_physical_2015,
title = {Physical Space Requirements for Redirected Walking: How Size and Shape Affect Performance},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {https://diglib.eg.org/handle/10.2312/13833},
doi = {10.2312/egve.20151315},
isbn = {978-3-905674-84-2},
year = {2015},
date = {2015-10-01},
booktitle = {Eurographics Symposium on Virtual Environments (2015)},
pages = {93--100},
publisher = {The Eurographics Association},
address = {Kyoto, Japan},
abstract = {Redirected walking provides a compelling solution to explore large virtual environments in a natural way. However, research literature provides few guidelines regarding trade-offs involved in selecting size and layout for physical tracked space. We designed a rigorously controlled benchmarking framework and conducted two simulated user experiments to systematically investigate how the total area and dimensions of the tracked space affect performance of steer-to-center and steer-to-orbit algorithms. The results indicate that minimum viable size of physical tracked space for these redirected walking algorithms is approximately 6m 6m with performance continuously improving in larger tracked spaces. At the same time, no ”optimal” tracked space size can guarantee the absence of contacts with the boundary. We also found that square tracked spaces enabled best overall performance with steer-to-center algorithm also performing well in moderately elongated rectangular spaces. Finally, we demonstrate that introducing translation gains can provide a useful boost in performance, particularly when physical space is constrained. We conclude with the discussion of potential applications of our benchmarking toolkit to other problems related to performance of redirected walking platforms.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Honig, Wolfgang; Milanes, Christina; Scaria, Lisa; Phan, Thai; Bolas, Mark; Ayanian, Nora
Mixed Reality for Robotics Inproceedings
In: 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 5382 – 5387, IEEE, Hamburg, Germany, 2015.
@inproceedings{honig_mixed_2015,
title = {Mixed Reality for Robotics},
author = {Wolfgang Honig and Christina Milanes and Lisa Scaria and Thai Phan and Mark Bolas and Nora Ayanian},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7354138&tag=1},
doi = {10.1109/IROS.2015.7354138},
year = {2015},
date = {2015-09-01},
booktitle = {2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {5382 -- 5387},
publisher = {IEEE},
address = {Hamburg, Germany},
abstract = {Mixed Reality can be a valuable tool for research and development in robotics. In this work, we refine the definition of Mixed Reality to accommodate seamless interaction between physical and virtual objects in any number of physical or virtual environments. In particular, we show that Mixed Reality can reduce the gap between simulation and implementation by enabling the prototyping of algorithms on a combination of physical and virtual objects, including robots, sensors, and humans. Robots can be enhanced with additional virtual capabilities, or can interact with humans without sharing physical space. We demonstrate Mixed Reality with three representative experiments, each of which highlights the advantages of our approach. We also provide a testbed for Mixed Reality with three different virtual robotics environments in combination with the Crazyflie 2.0 quadcopter.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Bolas, Mark; Debevec, Paul
An Automultiscopic Projector Array for Interactive Digital Humans Inproceedings
In: SIGGRAPH 2015, pp. 1–1, ACM Press, Los Angeles, CA, 2015, ISBN: 978-1-4503-3635-2.
@inproceedings{jones_automultiscopic_2015,
title = {An Automultiscopic Projector Array for Interactive Digital Humans},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Automultiscopic%20Projector%20Array%20for%20Interactive%20Digital%20Humans.pdf},
doi = {10.1145/2782782.2792494},
isbn = {978-1-4503-3635-2},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015},
pages = {1--1},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear. Our display uses a dense array of video projectors to generate many images with high-angular density over a wide-field of view. As each user moves around the display, their eyes smoothly transition from one view to the next. The display is ideal for displaying life-size human subjects as it allows for natural personal interactions with 3D cues such as eye gaze and spatial hand gestures. In this installation, we will explore ”time-offset” interactions with recorded 3D human subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Xueming; Wang, Shanhe; Busch, Jay; Phan, Thai; McSheery, Tracy; Bolas, Mark; Debevec, Paul
Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking Inproceedings
In: Proceedings of ACM SIGGRAPH 2015 Posters, pp. 94, ACM, Los Angeles, CA, 2015.
@inproceedings{yu_virtual_2015,
title = {Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking},
author = {Xueming Yu and Shanhe Wang and Jay Busch and Thai Phan and Tracy McSheery and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Headcam%20-%20Pantilt%20Mirror-based%20Facial%20Performance%20Tracking.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Posters},
pages = {94},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {High-end facial performance capture solutions typically use head-mounted camera systems which provide one or more close-up video streams of each actor's performance. These provide clear views of each actor's performance, but can be bulky, uncomfortable, get in the way of sight lines, and prevent actors from getting close to each other. To address this, we propose a virtual head-mounted camera system: an array of cameras placed around around the performance capture volume which automatically track zoomed-in, sharply focussed, high-resolution views of the each actor's face from a multitude of directions. The resulting imagery can be used in conjunction with body motion capture data to derive nuanced facial performances without head-mounted cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Grechkin, Timofey; Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Towards Context-Sensitive Reorientation for Real Walking in Virtual Reality Inproceedings
In: 2015 IEEE Virtual Reality (VR), pp. 185–186, IEEE, Arles, France, 2015, ISBN: 978-1-4799-1727-3.
@inproceedings{grechkin_towards_2015,
title = {Towards Context-Sensitive Reorientation for Real Walking in Virtual Reality},
author = {Timofey Grechkin and Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7223357},
doi = {10.1109/VR.2015.7223357},
isbn = {978-1-4799-1727-3},
year = {2015},
date = {2015-03-01},
booktitle = {2015 IEEE Virtual Reality (VR)},
pages = {185--186},
publisher = {IEEE},
address = {Arles, France},
abstract = {Redirected walking techniques have been introduced to overcome physical limitations for natural locomotion in virtual reality. Although subtle perceptual manipulations are helpful to keep users within relatively small tracked spaces, it is inevitable that users will approach critical boundary limits. Current solutions to this problem involve breaks in presence by introducing distractors, or freezing the virtual world relative to the user’s perspective. We propose an approach that integrates into the virtual world narrative to draw users’ attention and to cause them to temporarily alter their course to avoid going off bounds. This method ties together unnoticeable translation, rotation, and curvature gains, efficiently reorienting the user while maintaining the user’s sense of immersion. We also discuss how this new method can be effectively used in conjunction with other reorientation techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan A.; Krum, David M.; Richmond, Todd; Bolas, Mark
The MxR Lab at the USC Institute for Creative Technologies Book
Arles, France, 2015.
@book{suma_mxr_2015,
title = {The MxR Lab at the USC Institute for Creative Technologies},
author = {Evan A. Suma and David M. Krum and Todd Richmond and Mark Bolas},
url = {http://ict.usc.edu/pubs/The%20MxR%20Lab%20at%20the%20USC%20Institute%20for%20Creative%20Technologies.pdf},
year = {2015},
date = {2015-03-01},
address = {Arles, France},
abstract = {The MxR Lab at the University of Southern California explores techniques and technologies to improve the fluency of humancomputer interactions and create engaging and effective synthetic experiences. With a research facility at the Institute for Creative Technologies as well as the satellite MxR Studio at the School of Cinematic Arts, this unique environment facilitates crossdisciplinary teams from computer science, engineering, communications, and cinema. The MxR Lab philosophy begins with rapid prototyping and playful exploration that progressively evolves to more refined development pipelines, formal research studies, and eventual dissemination through academic papers and open-source initiatives. We also sometimes engage in large-scale Nerf battles.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Casas, Dan; Alexander, Oleg; Feng, Andrew W.; Fyffe, Graham; Ichikari, Ryosuke; Debevec, Paul; Wang, Rhuizhe; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors Inproceedings
In: Proceedings of the 19th Symposium on Interactive 3D Graphics and Games, pp. 134–134, ACM Press, San Francisco, CA, 2015, ISBN: 978-1-4503-3392-4.
@inproceedings{casas_rapid_2015,
title = {Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors},
author = {Dan Casas and Oleg Alexander and Andrew W. Feng and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2699276.2721398},
doi = {10.1145/2699276.2721398},
isbn = {978-1-4503-3392-4},
year = {2015},
date = {2015-02-01},
booktitle = {Proceedings of the 19th Symposium on Interactive 3D Graphics and Games},
pages = {134--134},
publisher = {ACM Press},
address = {San Francisco, CA},
abstract = {Creating and animating a realistic 3D human face has been an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. In this paper, we propose a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single Microsoft Kinect and requires no artistic expertise on the part of the capture subject. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires 4 minutes of processing time to transform it into a blendshape model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmark detection and optical flow. We demonstrate the effectiveness of the proposed method by capturing 3D facial models of different human subjects and puppeteering their models in an animation system with real-time facial performance retargeting.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Lucas, Gale; Marsella, Stacy; Suma, Evan; Chiu, Chung-Cheng; Casas, Dan; Shapiro, Ari
Acting the Part: The Role of Gesture on Avatar Identity Inproceedings
In: Proceedings of the Seventh International Conference on Motion in Games (MIG 2014), pp. 49–54, ACM Press, Playa Vista, CA, 2014, ISBN: 978-1-4503-2623-0.
@inproceedings{feng_acting_2014,
title = {Acting the Part: The Role of Gesture on Avatar Identity},
author = {Andrew Feng and Gale Lucas and Stacy Marsella and Evan Suma and Chung-Cheng Chiu and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2668064.2668102},
doi = {10.1145/2668064.2668102},
isbn = {978-1-4503-2623-0},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the Seventh International Conference on Motion in Games (MIG 2014)},
pages = {49--54},
publisher = {ACM Press},
address = {Playa Vista, CA},
abstract = {Recent advances in scanning technology have enabled the widespread capture of 3D character models based on human subjects. However, in order to generate a recognizable 3D avatar, the movement and behavior of the human subject should be captured and replicated as well. We present a method of generating a 3D model from a scan, as well as a method to incorporate a subjects style of gesturing into a 3D character. We present a study which shows that 3D characters that used the gestural style as their original human subjects were more recognizable as the original subject than those that don’t.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Chih-Fan; Spicer, Ryan; Yahata, Rhys; Bolas, Mark; Suma, Evan
Real-time and Robust Grasping Detection Inproceedings
In: Proceedings of the 2nd ACM symposium on Spatial user interaction, pp. 159–159, ACM, Honolulu, HI, 2014.
@inproceedings{chen_real-time_2014,
title = {Real-time and Robust Grasping Detection},
author = {Chih-Fan Chen and Ryan Spicer and Rhys Yahata and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Real-Time%20and%20Robust%20Grasping%20Detection.pdf},
year = {2014},
date = {2014-10-01},
booktitle = {Proceedings of the 2nd ACM symposium on Spatial user interaction},
pages = {159--159},
publisher = {ACM},
address = {Honolulu, HI},
abstract = {Depth-based gesture cameras provide a promising and novel way to interface with computers. Nevertheless, this type of interaction remains challenging due to the complexity of finger interactions and the under large viewpoint variations. Existing middleware such as Intel Perceptual Computing SDK (PCSDK) or SoftKinetic IISU can provide abundant hand tracking and gesture information. However, the data is too noisy (Fig. 1, left) for consistent and reliable use in our application. In this work, we present a filtering approach that combines several features from PCSDK to achieve more stable hand openness and supports grasping interactions in virtual environments. Support vector machine (SVM), a machine learning method, is used to achieve better accuracy in a single frame, and Markov Random Field (MRF), a probability theory, is used to stabilize and smooth the sequential output. Our experimental results verify the effectiveness and the robustness of our method.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Countering User Deviation During Redirected Walking Inproceedings
In: Proceedings of the ACM Symposium on Applied Perception, Vancouver, British Columbia, Canada, 2014.
@inproceedings{azmandian_countering_2014,
title = {Countering User Deviation During Redirected Walking},
author = {Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Countering%20User%20Deviation%20During%20Redirected%20Walking.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
address = {Vancouver, British Columbia, Canada},
abstract = {Redirected Walking is technique that leverages human perception characteristics to allow locomotion in virtual environments larger than the tracking area. Among the many redirection techniques, some strictly depend on the user’s current position and orientation, while more recent algorithms also depend on the user’s predicted behavior. This prediction serves as an input to a computationally expensive search to determine an optimal path. The search output is formulated as a series of gains to be applied at different stages along the path. An example prediction could be if a user is walking down a corridor, a natural prediction would be that the user will walk along a straight line down the corridor, and she will choose one of the possible directions with equal probability. In practice, deviations from the expected virtual path are inevitable, and as a result, the real world path traversed will differ from the original prediction. These deviations can not only force the search to select a less optimal path in the next iteration, but also in cases cause the users to go off bounds, requiring resets, causing a jarring experience for the user. We propose a method to account for these deviations by modifying the redirection gains per update frame, aiming to keep the user on the intended predicted physical path.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Feng, Andrew; Wang, Ruizhe; Li, Hao; Bolas, Mark; Medioni, Gerard; Suma, Evan
Rapid avatar capture and simulation using commodity depth sensors Journal Article
In: Computer Animation and Virtual Worlds, vol. 25, no. 3-4, pp. 201–211, 2014, ISSN: 15464261.
@article{shapiro_rapid_2014,
title = {Rapid avatar capture and simulation using commodity depth sensors},
author = {Ari Shapiro and Andrew Feng and Ruizhe Wang and Hao Li and Mark Bolas and Gerard Medioni and Evan Suma},
url = {http://ict.usc.edu/pubs/Rapid%20Avatar%20Capture%20and%20Simulation%20Using%20Commodity%20Depth%20Sensors.pdf},
doi = {10.1002/cav.1579},
issn = {15464261},
year = {2014},
date = {2014-05-01},
journal = {Computer Animation and Virtual Worlds},
volume = {25},
number = {3-4},
pages = {201--211},
abstract = {We demonstrate a method of acquiring a 3D model of a human using commodity scanning hardware and then controlling that 3D figure in a simulated environment in only a few minutes. The model acquisition requires four static poses taken at 90 degree angles relative to each other. The 3D model is then given a skeleton and smooth binding information necessary for control and simulation. The 3D models that are captured are suitable for use in applications where recognition and distinction among characters by shape, form, or clothing is important, such as small group or crowd simulations or other socially oriented applications. Because of the speed at which a human figure can be captured and the low hardware requirements, this method can be used to capture, track, and model human figures as their appearances change over time.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Krum, David M.; Kang, Sin-Hwa; Bolas, Mark
Virtual Coaches over Mobile Video Inproceedings
In: Proceedingsof International Conference on Computer Animation and Social Agents (CASA), 2014.
@inproceedings{krum_virtual_2014,
title = {Virtual Coaches over Mobile Video},
author = {David M. Krum and Sin-Hwa Kang and Mark Bolas},
url = {http://ict.usc.edu/pubs/Virtual%20Coaches%20over%20Mobile%20Video.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedingsof International Conference on Computer Animation and Social Agents (CASA)},
abstract = {We hypothesize that the context of a smartphone, how a virtual human is presented within a smartphone app, and indeed, the nature of that app, can profoundly affect how the virtual human is perceived by a real human. We believe that virtual humans, presented over video chat services (such as Skype) and delivered using mobile phones, can be an effective way to deliver coaching applications. We propose to build a prototype system that allows virtual humans to initiate and receive Skype calls. This hardware will enable broadcast of the audio and video imagery of a character. Using this platform and a virtual human, we will conduct two user studies. The first study will examine factors involved in making a mobile video based character seem engaging and “real”. This study will examine how character appearance and the artifacts of the communication channel, such as video and audio quality, can affect rapport with a virtual human. The second study will examine ways to maintain a long-term relationship with a character, leveraging the character’s ability to call and interact with a real human over a longer period of time. These studies will help develop design guidelines for presenting virtual humans over mobile video.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Yahata, Rhys; Bolas, Mark; Suma, Evan
An Enhanced Steering Algorithm for Redirected Walking in Virtual Environments Inproceedings
In: IEEE Virtual Reality 2014, pp. 65–66, 2014.
@inproceedings{azmandian_enhanced_2014,
title = {An Enhanced Steering Algorithm for Redirected Walking in Virtual Environments},
author = {Mahdi Azmandian and Rhys Yahata and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/An%20Enhanced%20Steering%20Algorithm%20for%20Redirected%20Walking%20in%20Virtual%20Environments.pdf},
year = {2014},
date = {2014-03-01},
booktitle = {IEEE Virtual Reality 2014},
pages = {65--66},
abstract = {Redirected walking techniques enable natural locomotion through immersive virtual environments that are considerably larger than the available real world walking space. However, the most effective strategy for steering the user remains an open question, as most previously presented algorithms simply redirect toward the center of the physical space. In this work, we present a theoretical framework that plans a walking path through a virtual environment and calculates the parameters for combining translation, rotation, and curvature gains such that the user can traverse a series of defined waypoints efficiently based on a utility function. This function minimizes the number of overt reorientations to avoid introducing potential breaks in presence. A notable advantage of this approach is that it leverages knowledge of the layout of both the physical and virtual environments to enhance the steering strategy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
Interpolating vertical parallax for an autostereoscopic three-dimensional projector array Journal Article
In: Journal of Electronic Imaging, vol. 23, no. 1, 2014, ISSN: 1017-9909.
@article{jones_interpolating_2014,
title = {Interpolating vertical parallax for an autostereoscopic three-dimensional projector array},
author = {Andrew Jones and Koki Nagano and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://electronicimaging.spiedigitallibrary.org/article.aspx?doi=10.1117/1.JEI.23.1.011005},
doi = {10.1117/1.JEI.23.1.011005},
issn = {1017-9909},
year = {2014},
date = {2014-03-01},
journal = {Journal of Electronic Imaging},
volume = {23},
number = {1},
abstract = {We present a technique for achieving tracked vertical parallax for multiple users using a variety of autostereoscopic projector array setups, including front- and rear-projection and curved display surfaces. This hybrid parallax approach allows for immediate horizontal parallax as viewers move left and right and tracked parallax as they move up and down, allowing cues such as three-dimensional (3-D) perspective and eye contact to be conveyed faithfully. We use a low-cost RGB-depth sensor to simultaneously track multiple viewer head positions in 3-D space, and we interactively update the imagery sent to the array so that imagery directed to each viewer appears from a consistent and correct vertical perspective. Unlike previous work, we do not assume that the imagery sent to each projector in the array is rendered from a single vertical perspective. This lets us apply hybrid parallax to displays where a single projector forms parts of multiple viewers’ imagery. Thus, each individual projected image is rendered with multiple centers of projection, and might show an object from above on the left and from below on the right. We demonstrate this technique using a dense horizontal array of pico-projectors aimed into an anisotropic vertical diffusion screen, yielding 1.5 deg angular resolution over 110 deg field of view. To create a seamless viewing experience for multiple viewers, we smoothly interpolate the set of viewer heights and distances on a per-vertex basis across the array’s field of view, reducing image distortion, cross talk, and artifacts from tracking errors.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2010
Bolas, Mark; Olson, Logan
Design Approach for Multi-Touch Interfaces in Creative Production Environments Inproceedings
In: Workshop of the ACM SIGCHI Symposium on Engineering Interactive Computing Systems, Berlin, Germany, 2010.
@inproceedings{bolas_design_2010,
title = {Design Approach for Multi-Touch Interfaces in Creative Production Environments},
author = {Mark Bolas and Logan Olson},
year = {2010},
date = {2010-06-01},
booktitle = {Workshop of the ACM SIGCHI Symposium on Engineering Interactive Computing Systems},
address = {Berlin, Germany},
abstract = {Multi-touch gained a lot of interest in the last couple of years and the increased availability of multi-touch enabled hardware boosted its development. However, the current diversity of hardware, toolkits, and tools for creating multi- touch interfaces has its downsides: there is only little reusable material and no generally accepted body of knowledge when it comes to the development of multi- touch interfaces. This workshop seeks a consensus on methods, approaches, toolkits, and tools that aid in the engineering of multi-touch interfaces and transcend the differences in available platforms. The patterns mentioned in the title indicate that we are aiming to create a reusable body of knowledge.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Bolas, Mark; Krum, David M.
In: Pervasive 2010 Ubiprojection Workshop, 2010.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{bolas_augmented_2010,
title = {Augmented Reality Applications and User Interfaces Using Head-Coupled Near-Axis Personal Projectors with Novel Retroreflective Props and Surfaces},
author = {Mark Bolas and David M. Krum},
url = {http://ict.usc.edu/pubs/Augmented%20Reality%20Applications%20and%20User%20Interfaces%20Using%20Head-Coupled%20Near-Axis%20Personal%20Projectors%20with%20Novel%20Retroreflective%20Props%20and%20Surfaces.pdf},
year = {2010},
date = {2010-05-01},
booktitle = {Pervasive 2010 Ubiprojection Workshop},
abstract = {One motivation for the development of augmented reality technology has been the support of more realistic and flexible training simulations. Computer-generated characters and environments – combined with real world elements such as furniture and props to 'set the stage' – create the emotional, cognitive, and physical challenges necessary for well-rounded team-based training. This paper presents REFLCT, a mixed reality staging and display system that couples an unusual near-axis personal projector design with novel retroreflective props and surfaces. The system enables viewer-specific imagery to be composited directly into and onto a surrounding environment, without optics positioned in front of the user's eyes or face. Characterized as a stealth projector, it unobtrusively offers bright images with low power consumption. In addition to training applications, the approach appears to be well-matched with emerging user interface and application domains, such as asymmetric collaborative workspaces and mobile personalized guides.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Bolas, Mark
The Isolated Practitioner Inproceedings
In: ACM CHI 2010 Workshop on Researcher-Practitioner Interaction, 2010.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{krum_isolated_2010,
title = {The Isolated Practitioner},
author = {David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/The%20Isolated%20Practitioner.pdf},
year = {2010},
date = {2010-04-01},
booktitle = {ACM CHI 2010 Workshop on Researcher-Practitioner Interaction},
abstract = {Over the past few decades, a community of researchers and professionals has been advancing the art and science of interaction design. Unfortunately, many practitioners are isolated from this community. We feel that the lack of a relationship between these isolated practitioners and the human-computer interaction community is one of the greater challenges in improving the overall quality of interaction design in the products and services used by our society. In this position paper, we describe how this isolation arises. We then propose ways to improve the connection between the HCI community and these isolated practitioners. These include early HCI instruction in the undergraduate curriculum, establishing HCI certificate programs, utilizing new media to summarize and disseminate important HCI results, highlighting accomplishments in interaction design, and performing other forms of outreach.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Sadek, Ramy; Kohli, Luv; Olson, Logan; Bolas, Mark
Experiments in Mixed Reality Inproceedings
In: Proceedings of SPIE Electronic Imaging Science and Technology Conference, 2010.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{krum_experiments_2010,
title = {Experiments in Mixed Reality},
author = {David M. Krum and Ramy Sadek and Luv Kohli and Logan Olson and Mark Bolas},
url = {http://ict.usc.edu/pubs/Experiments%20in%20Mixed%20Reality.pdf},
doi = {10.1117/12.844904},
year = {2010},
date = {2010-01-01},
booktitle = {Proceedings of SPIE Electronic Imaging Science and Technology Conference},
abstract = {As part of the Institute for Creative Technologies and the School of Cinematic Arts at the University of Southern California, the Mixed Reality lab develops technologies and techniques for presenting realistic immersive training experiences. Such experiences typically place users within a complex ecology of social actors, physical objects, and collections of intents, motivations, relationships, and other psychological constructs. Currently, it remains infeasible to completely synthesize the interactivity and sensory signatures of such ecologies. For this reason, the lab advocates mixed reality methods for training and conducts experiments exploring such methods. Currently, the lab focuses on understanding and exploiting the elasticity of human perception with respect to representational differences between real and virtual environments. This paper presents an overview of three projects: techniques for redirected walking, displays for the representation of virtual humans, and audio processing to increase stress.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
2009
Jones, Andrew; Lang, Magnus; Fyffe, Graham; Yu, Xueming; Busch, Jay; McDowall, Ian; Bolas, Mark; Debevec, Paul
Achieving Eye Contact in a One-to-Many 3D Video Teleconferencing System Journal Article
In: ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2009, vol. 28, no. 3, 2009.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@article{jones_achieving_2009,
title = {Achieving Eye Contact in a One-to-Many 3D Video Teleconferencing System},
author = {Andrew Jones and Magnus Lang and Graham Fyffe and Xueming Yu and Jay Busch and Ian McDowall and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Achieving%20Eye%20Contact%20in%20a%20One-to-Many%203D%20Video%20Teleconferencing%20System.pdf},
year = {2009},
date = {2009-08-01},
journal = {ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2009},
volume = {28},
number = {3},
abstract = {We present a set of algorithms and an associated display system capable of producing correctly rendered eye contact between a three-dimensionally transmitted remote participant and a group of observers in a 3D teleconferencing system. The participant's face is scanned in 3D at 30Hz and transmitted in real time to an autostereo- scopic horizontal-parallax 3D display, displaying him or her over more than a 180â—¦ field of view observable to multiple observers. To render the geometry with correct perspective, we create a fast vertex shader based on a 6D lookup table for projecting 3D scene vertices to a range of subject angles, heights, and distances. We generalize the projection mathematics to arbitrarily shaped display surfaces, which allows us to employ a curved concave display surface to focus the high speed imagery to individual observers. To achieve two-way eye contact, we capture 2D video from a cross-polarized camera reflected to the position of the virtual participant's eyes, and display this 2D video feed on a large screen in front of the real par- ticipant, replicating the viewpoint of their virtual self. To achieve correct vertical perspective, we further leverage this image to track the position of each audience member's eyes, allowing the 3D dis- play to render correct vertical perspective for each of the viewers around the device. The result is a one-to-many 3D teleconferenc- ing system able to reproduce the effects of gaze, attention, and eye contact generally missing in traditional teleconferencing systems.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {article}
}
Krum, David M.; Piepol, Diane; Bolas, Mark
Sharing and Stretching Space with Full Body Tracking Inproceedings
In: Whole Body Interaction 2009, A SIGCHI 2009 Workshop, 2009.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{krum_sharing_2009,
title = {Sharing and Stretching Space with Full Body Tracking},
author = {David M. Krum and Diane Piepol and Mark Bolas},
url = {http://ict.usc.edu/pubs/Sharing%20and%20Stretching%20Space%20with%20Full%20Body%20Tracking.pdf},
year = {2009},
date = {2009-04-01},
booktitle = {Whole Body Interaction 2009, A SIGCHI 2009 Workshop},
abstract = {New opportunities emerge when mixed reality environments are augmented with wide field of view displays and full body, real-time tracking. Such systems will allow users see a correctly tracked representation of themselves in the virtual environment, and allow users to "share space" with other virtual humans in the virtual environment. Furthermore, such systems will be able to use tracking data to identify opportunities when a user's perception of the environment can be altered. This would be helpful in situations where redirection or reorientation of the user might be done to "stretch space," i.e. imperceptibly rotating the environment around the user, so that a straight-line walk becomes a curve, preventing the user from ever encountering walls in the physical space. We believe that allowing users to co-inhabit virtual spaces with virtual humans and decoupling physical size constraints from these virtual spaces are two important building blocks for effective mixed reality training experiences.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
2008
Bolas, Mark; Lange, Belinda; Dallas, I.; Rizzo, Albert
Engaging breathing exercises: developing an interactive XNA-based air flow sensing and control system Inproceedings
In: Virtual Rehabilitation, pp. 72, Vancouver, CA, 2008.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@inproceedings{bolas_engaging_2008,
title = {Engaging breathing exercises: developing an interactive XNA-based air flow sensing and control system},
author = {Mark Bolas and Belinda Lange and I. Dallas and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Engaging%20breathing%20exercises-%20developing%20an%20interactive%20XNA-based%20air%20flow%20sensing%20and%20control%20system.jpg},
year = {2008},
date = {2008-08-01},
booktitle = {Virtual Rehabilitation},
pages = {72},
address = {Vancouver, CA},
abstract = {The aim of this project was to make breathing exercises for children with Cystic Fibrosis fun. We developed a prototype device that uses breathing to control specifically designed video games.},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
2007
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
An Interactive 360° Light Field Display Inproceedings
In: SIGGRAPH, San Diego, CA, 2007.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_interactive_2007,
title = {An Interactive 360° Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
abstract = {While a great deal of computer generated imagery is modeled and rendered in 3D, the vast majority of this 3D imagery is shown on 2D displays. Various forms of 3D displays have been contemplated and constructed for at least one hundred years [Lippman 1908], but only recent evolutions in digital capture, computation, and display have made functional and practical 3D displays possible.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Chabert, Charles-Felix; Bolas, Mark; Peers, Pieter; Debevec, Paul
A system for high-resolution face scanning based on polarized spherical illumination Inproceedings
In: SIGGRAPH, San Diego, CA, 2007.
Links | BibTeX | Tags: Graphics, MxR
@inproceedings{ma_system_2007,
title = {A system for high-resolution face scanning based on polarized spherical illumination},
author = {Wan-Chun Ma and Tim Hawkins and Charles-Felix Chabert and Mark Bolas and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20system%20for%20high-resolution%20face%20scanning%20based%20on%20polarized%20spherical%20illumination.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
Rendering for an Interactive 360 Degree Light Field Display Inproceedings
In: ACM SIGGRAPH conference proceedings, San Diego, CA, 2007.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_rendering_2007,
title = {Rendering for an Interactive 360 Degree Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Rendering%20for%20an%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {ACM SIGGRAPH conference proceedings},
address = {San Diego, CA},
abstract = {We describe a set of rendering techniques for an autostereoscopic light field display able to present interactive 3D graphics to multiple simultaneous viewers 360 degrees around the display. The display consists of a high-speed video projector, a spinning mirror covered by a holographic diffuser, and FPGA circuitry to decode specially rendered DVI video signals. The display uses a standard programmable graphics card to render over 5,000 images per second of interactive 3D graphics, projecting 360-degree views with 1.25 degree separation up to 20 updates per second. We describe the system's projection geometry and its calibration process, and we present a multiple-center-of-projection rendering technique for creating perspective-correct images from arbitrary viewpoints around the display. Our projection technique allows correct vertical perspective and parallax to be rendered for any height and distance when these parameters are known, and we demonstrate this effect with interactive raster graphics using a tracking system to measure the viewer's height and distance. We further apply our projection technique to the display of photographed light fields with accurate horizontal and vertical parallax. We conclude with a discussion of the display's visual accommodation performance and discuss techniques for displaying color imagery.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
2006
Debevec, Paul; Bolas, Mark; McDowall, Ian
Concave Surround Optics for Rapid Multi-View Imaging Inproceedings
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{debevec_concave_2006,
title = {Concave Surround Optics for Rapid Multi-View Imaging},
author = {Paul Debevec and Mark Bolas and Ian McDowall},
url = {http://ict.usc.edu/pubs/ConcaveSurroundOptics_ASC2006.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Many image-based modeling and rendering techniques involve photographing a scene from an array of different viewpoints. Usually, this is achieved by moving the camera or the subject to successive positions, or by photographing the scene with an array of cameras. In this work, we present a system of mirrors to simulate the appearance of camera movement around a scene while the physical camera remains stationary. The system thus is amenable to capturing dynamic events avoiding the need to construct and calibrate an array of cameras. We demonstrate the system with a high speed video of a dynamic scene. We show smooth camera motion rotating 360 degrees around the scene. We discuss the optical performance of our system and compare with alternate setups.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Simulating Spatially Varying Lighting on a Live Performance Inproceedings
In: 3rd European Conference on Visual Media Production (CVMP 2006), London, UK, 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_simulating_2006,
title = {Simulating Spatially Varying Lighting on a Live Performance},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Simulating%20Spatially%20Varying%20Lighting%20on%20a%20Live%20Performance.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {3rd European Conference on Visual Media Production (CVMP 2006)},
address = {London, UK},
abstract = {We present an image-based technique for relighting dynamic human performances under spatially varying illumination. Our system generates a time-multiplexed LED basis and a geometric model recovered from high-speed structured light patterns. The geometric model is used to scale the intensity of each pixel differently according to its 3D position within the spatially varying illumination volume. This yields a first-order approximation of the correct appearance under the spatially varying illumination. A global illumination process removes indirect illumination from the original lighting basis and simulates spatially varying indirect illumination. We demonstrate this technique for a human performance under several spatially varying lighting environments.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Einarsson, Per; Chabert, Charles-Felix; Jones, Andrew; Ma, Wan-Chun; Lamond, Bruce; Hawkins, Tim; Bolas, Mark; Sylwan, Sebastian; Debevec, Paul
Relighting Human Locomotion with Flowed Reflectance Fields Inproceedings
In: Eurographics Symposium on Rendering (2006), 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{einarsson_relighting_2006,
title = {Relighting Human Locomotion with Flowed Reflectance Fields},
author = {Per Einarsson and Charles-Felix Chabert and Andrew Jones and Wan-Chun Ma and Bruce Lamond and Tim Hawkins and Mark Bolas and Sebastian Sylwan and Paul Debevec},
url = {http://ict.usc.edu/pubs/Relighting%20Human%20Locomotion%20with%20Flowed%20Reflectance%20Fields.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Eurographics Symposium on Rendering (2006)},
abstract = {We present an image-based approach for capturing the appearance of a walking or running person so they can be rendered realistically under variable viewpoint and illumination. In our approach, a person walks on a treadmill at a regular rate as a turntable slowly rotates the person's direction. As this happens, the person is filmed with a vertical array of high-speed cameras under a time-multiplexed lighting basis, acquiring a seven-dimensional dataset of the person under variable time, illumination, and viewing direction in approximately forty seconds. We process this data into a flowed reflectance field using an optical flow algorithm to correspond pixels in neighboring camera views and time samples to each other, and we use image compression to reduce the size of this data.We then use image-based relighting and a hardware-accelerated combination of view morphing and light field rendering to render the subject under user-specified viewpoint and lighting conditions. To composite the person into a scene, we use an alpha channel derived from back lighting and a retroreflective treadmill surface and a visual hull process to render the shadows the person would cast onto the ground. We demonstrate realistic composites of several subjects into real and virtual environments using our technique.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Bolas, Mark; Pair, Jarrell; Haynes, Kip; McDowall, Ian
Display Research at the University of Southern California Inproceedings
In: IEEE Emerging Displays Workshop, Alexandria, VA, 2006.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{bolas_display_2006,
title = {Display Research at the University of Southern California},
author = {Mark Bolas and Jarrell Pair and Kip Haynes and Ian McDowall},
url = {http://ict.usc.edu/pubs/Display%20Research%20at%20the%20University%20of%20Southern%20California.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {IEEE Emerging Displays Workshop},
address = {Alexandria, VA},
abstract = {The University of Southern California and its collaborative research partner, Fakespace Labs, are participating in a number of research programs to invent and implement new forms of display technologies for immersive and semi-immersive applications. This paper briefly describes three of these technologies and highlights a few emerging results from those efforts. The first system is a rear projected 300 degree field of view cylindrical display. It is driven by 11 projectors with geometry correction and edge blending hardware. A full scale prototype will be completed in March 2006. The second system is a 14 screen projected panoramic room environment used as an advanced teaching and meeting space. It can be driven by a cluster of personal computers or low-cost DVD players, or driven by a single personal computer. The third is a prototype stereoscopic head mounted display that can be worn in a fashion similar to standard dust protection goggles. It provides a field of view in excess of 150 degrees.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
2005
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Performance Geometry Capture for Spatially Varying Relighting Inproceedings
In: SIGGRAPH 2005 Sketch, Los Angeles, CA, 2005.
Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_performance_2005,
title = {Performance Geometry Capture for Spatially Varying Relighting},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Performance%20Geometry%20Capture%20for%20Spatially%20Varying%20Relighting.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH 2005 Sketch},
address = {Los Angeles, CA},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}