Publications
Search
Bolas, Mark; Olson, Logan
Design Approach for Multi-Touch Interfaces in Creative Production Environments Inproceedings
In: Workshop of the ACM SIGCHI Symposium on Engineering Interactive Computing Systems, Berlin, Germany, 2010.
@inproceedings{bolas_design_2010,
title = {Design Approach for Multi-Touch Interfaces in Creative Production Environments},
author = {Mark Bolas and Logan Olson},
year = {2010},
date = {2010-06-01},
booktitle = {Workshop of the ACM SIGCHI Symposium on Engineering Interactive Computing Systems},
address = {Berlin, Germany},
abstract = {Multi-touch gained a lot of interest in the last couple of years and the increased availability of multi-touch enabled hardware boosted its development. However, the current diversity of hardware, toolkits, and tools for creating multi- touch interfaces has its downsides: there is only little reusable material and no generally accepted body of knowledge when it comes to the development of multi- touch interfaces. This workshop seeks a consensus on methods, approaches, toolkits, and tools that aid in the engineering of multi-touch interfaces and transcend the differences in available platforms. The patterns mentioned in the title indicate that we are aiming to create a reusable body of knowledge.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bolas, Mark; Krum, David M.
In: Pervasive 2010 Ubiprojection Workshop, 2010.
@inproceedings{bolas_augmented_2010,
title = {Augmented Reality Applications and User Interfaces Using Head-Coupled Near-Axis Personal Projectors with Novel Retroreflective Props and Surfaces},
author = {Mark Bolas and David M. Krum},
url = {http://ict.usc.edu/pubs/Augmented%20Reality%20Applications%20and%20User%20Interfaces%20Using%20Head-Coupled%20Near-Axis%20Personal%20Projectors%20with%20Novel%20Retroreflective%20Props%20and%20Surfaces.pdf},
year = {2010},
date = {2010-05-01},
booktitle = {Pervasive 2010 Ubiprojection Workshop},
abstract = {One motivation for the development of augmented reality technology has been the support of more realistic and flexible training simulations. Computer-generated characters and environments – combined with real world elements such as furniture and props to 'set the stage' – create the emotional, cognitive, and physical challenges necessary for well-rounded team-based training. This paper presents REFLCT, a mixed reality staging and display system that couples an unusual near-axis personal projector design with novel retroreflective props and surfaces. The system enables viewer-specific imagery to be composited directly into and onto a surrounding environment, without optics positioned in front of the user's eyes or face. Characterized as a stealth projector, it unobtrusively offers bright images with low power consumption. In addition to training applications, the approach appears to be well-matched with emerging user interface and application domains, such as asymmetric collaborative workspaces and mobile personalized guides.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Bolas, Mark
The Isolated Practitioner Inproceedings
In: ACM CHI 2010 Workshop on Researcher-Practitioner Interaction, 2010.
@inproceedings{krum_isolated_2010,
title = {The Isolated Practitioner},
author = {David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/The%20Isolated%20Practitioner.pdf},
year = {2010},
date = {2010-04-01},
booktitle = {ACM CHI 2010 Workshop on Researcher-Practitioner Interaction},
abstract = {Over the past few decades, a community of researchers and professionals has been advancing the art and science of interaction design. Unfortunately, many practitioners are isolated from this community. We feel that the lack of a relationship between these isolated practitioners and the human-computer interaction community is one of the greater challenges in improving the overall quality of interaction design in the products and services used by our society. In this position paper, we describe how this isolation arises. We then propose ways to improve the connection between the HCI community and these isolated practitioners. These include early HCI instruction in the undergraduate curriculum, establishing HCI certificate programs, utilizing new media to summarize and disseminate important HCI results, highlighting accomplishments in interaction design, and performing other forms of outreach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Sadek, Ramy; Kohli, Luv; Olson, Logan; Bolas, Mark
Experiments in Mixed Reality Inproceedings
In: Proceedings of SPIE Electronic Imaging Science and Technology Conference, 2010.
@inproceedings{krum_experiments_2010,
title = {Experiments in Mixed Reality},
author = {David M. Krum and Ramy Sadek and Luv Kohli and Logan Olson and Mark Bolas},
url = {http://ict.usc.edu/pubs/Experiments%20in%20Mixed%20Reality.pdf},
doi = {10.1117/12.844904},
year = {2010},
date = {2010-01-01},
booktitle = {Proceedings of SPIE Electronic Imaging Science and Technology Conference},
abstract = {As part of the Institute for Creative Technologies and the School of Cinematic Arts at the University of Southern California, the Mixed Reality lab develops technologies and techniques for presenting realistic immersive training experiences. Such experiences typically place users within a complex ecology of social actors, physical objects, and collections of intents, motivations, relationships, and other psychological constructs. Currently, it remains infeasible to completely synthesize the interactivity and sensory signatures of such ecologies. For this reason, the lab advocates mixed reality methods for training and conducts experiments exploring such methods. Currently, the lab focuses on understanding and exploiting the elasticity of human perception with respect to representational differences between real and virtual environments. This paper presents an overview of three projects: techniques for redirected walking, displays for the representation of virtual humans, and audio processing to increase stress.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Lang, Magnus; Fyffe, Graham; Yu, Xueming; Busch, Jay; McDowall, Ian; Bolas, Mark; Debevec, Paul
Achieving Eye Contact in a One-to-Many 3D Video Teleconferencing System Journal Article
In: ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2009, vol. 28, no. 3, 2009.
@article{jones_achieving_2009,
title = {Achieving Eye Contact in a One-to-Many 3D Video Teleconferencing System},
author = {Andrew Jones and Magnus Lang and Graham Fyffe and Xueming Yu and Jay Busch and Ian McDowall and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Achieving%20Eye%20Contact%20in%20a%20One-to-Many%203D%20Video%20Teleconferencing%20System.pdf},
year = {2009},
date = {2009-08-01},
journal = {ACM Transactions on Graphics (TOG) - Proceedings of ACM SIGGRAPH 2009},
volume = {28},
number = {3},
abstract = {We present a set of algorithms and an associated display system capable of producing correctly rendered eye contact between a three-dimensionally transmitted remote participant and a group of observers in a 3D teleconferencing system. The participant's face is scanned in 3D at 30Hz and transmitted in real time to an autostereo- scopic horizontal-parallax 3D display, displaying him or her over more than a 180â—¦ field of view observable to multiple observers. To render the geometry with correct perspective, we create a fast vertex shader based on a 6D lookup table for projecting 3D scene vertices to a range of subject angles, heights, and distances. We generalize the projection mathematics to arbitrarily shaped display surfaces, which allows us to employ a curved concave display surface to focus the high speed imagery to individual observers. To achieve two-way eye contact, we capture 2D video from a cross-polarized camera reflected to the position of the virtual participant's eyes, and display this 2D video feed on a large screen in front of the real par- ticipant, replicating the viewpoint of their virtual self. To achieve correct vertical perspective, we further leverage this image to track the position of each audience member's eyes, allowing the 3D dis- play to render correct vertical perspective for each of the viewers around the device. The result is a one-to-many 3D teleconferenc- ing system able to reproduce the effects of gaze, attention, and eye contact generally missing in traditional teleconferencing systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Krum, David M.; Piepol, Diane; Bolas, Mark
Sharing and Stretching Space with Full Body Tracking Inproceedings
In: Whole Body Interaction 2009, A SIGCHI 2009 Workshop, 2009.
@inproceedings{krum_sharing_2009,
title = {Sharing and Stretching Space with Full Body Tracking},
author = {David M. Krum and Diane Piepol and Mark Bolas},
url = {http://ict.usc.edu/pubs/Sharing%20and%20Stretching%20Space%20with%20Full%20Body%20Tracking.pdf},
year = {2009},
date = {2009-04-01},
booktitle = {Whole Body Interaction 2009, A SIGCHI 2009 Workshop},
abstract = {New opportunities emerge when mixed reality environments are augmented with wide field of view displays and full body, real-time tracking. Such systems will allow users see a correctly tracked representation of themselves in the virtual environment, and allow users to "share space" with other virtual humans in the virtual environment. Furthermore, such systems will be able to use tracking data to identify opportunities when a user's perception of the environment can be altered. This would be helpful in situations where redirection or reorientation of the user might be done to "stretch space," i.e. imperceptibly rotating the environment around the user, so that a straight-line walk becomes a curve, preventing the user from ever encountering walls in the physical space. We believe that allowing users to co-inhabit virtual spaces with virtual humans and decoupling physical size constraints from these virtual spaces are two important building blocks for effective mixed reality training experiences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bolas, Mark; Lange, Belinda; Dallas, I.; Rizzo, Albert
Engaging breathing exercises: developing an interactive XNA-based air flow sensing and control system Inproceedings
In: Virtual Rehabilitation, pp. 72, Vancouver, CA, 2008.
@inproceedings{bolas_engaging_2008,
title = {Engaging breathing exercises: developing an interactive XNA-based air flow sensing and control system},
author = {Mark Bolas and Belinda Lange and I. Dallas and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Engaging%20breathing%20exercises-%20developing%20an%20interactive%20XNA-based%20air%20flow%20sensing%20and%20control%20system.jpg},
year = {2008},
date = {2008-08-01},
booktitle = {Virtual Rehabilitation},
pages = {72},
address = {Vancouver, CA},
abstract = {The aim of this project was to make breathing exercises for children with Cystic Fibrosis fun. We developed a prototype device that uses breathing to control specifically designed video games.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
An Interactive 360° Light Field Display Inproceedings
In: SIGGRAPH, San Diego, CA, 2007.
@inproceedings{jones_interactive_2007,
title = {An Interactive 360° Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
abstract = {While a great deal of computer generated imagery is modeled and rendered in 3D, the vast majority of this 3D imagery is shown on 2D displays. Various forms of 3D displays have been contemplated and constructed for at least one hundred years [Lippman 1908], but only recent evolutions in digital capture, computation, and display have made functional and practical 3D displays possible.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Chabert, Charles-Felix; Bolas, Mark; Peers, Pieter; Debevec, Paul
A system for high-resolution face scanning based on polarized spherical illumination Inproceedings
In: SIGGRAPH, San Diego, CA, 2007.
@inproceedings{ma_system_2007,
title = {A system for high-resolution face scanning based on polarized spherical illumination},
author = {Wan-Chun Ma and Tim Hawkins and Charles-Felix Chabert and Mark Bolas and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20system%20for%20high-resolution%20face%20scanning%20based%20on%20polarized%20spherical%20illumination.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
Rendering for an Interactive 360 Degree Light Field Display Inproceedings
In: ACM SIGGRAPH conference proceedings, San Diego, CA, 2007.
@inproceedings{jones_rendering_2007,
title = {Rendering for an Interactive 360 Degree Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Rendering%20for%20an%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {ACM SIGGRAPH conference proceedings},
address = {San Diego, CA},
abstract = {We describe a set of rendering techniques for an autostereoscopic light field display able to present interactive 3D graphics to multiple simultaneous viewers 360 degrees around the display. The display consists of a high-speed video projector, a spinning mirror covered by a holographic diffuser, and FPGA circuitry to decode specially rendered DVI video signals. The display uses a standard programmable graphics card to render over 5,000 images per second of interactive 3D graphics, projecting 360-degree views with 1.25 degree separation up to 20 updates per second. We describe the system's projection geometry and its calibration process, and we present a multiple-center-of-projection rendering technique for creating perspective-correct images from arbitrary viewpoints around the display. Our projection technique allows correct vertical perspective and parallax to be rendered for any height and distance when these parameters are known, and we demonstrate this effect with interactive raster graphics using a tracking system to measure the viewer's height and distance. We further apply our projection technique to the display of photographed light fields with accurate horizontal and vertical parallax. We conclude with a discussion of the display's visual accommodation performance and discuss techniques for displaying color imagery.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Bolas, Mark; McDowall, Ian
Concave Surround Optics for Rapid Multi-View Imaging Inproceedings
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
@inproceedings{debevec_concave_2006,
title = {Concave Surround Optics for Rapid Multi-View Imaging},
author = {Paul Debevec and Mark Bolas and Ian McDowall},
url = {http://ict.usc.edu/pubs/ConcaveSurroundOptics_ASC2006.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Many image-based modeling and rendering techniques involve photographing a scene from an array of different viewpoints. Usually, this is achieved by moving the camera or the subject to successive positions, or by photographing the scene with an array of cameras. In this work, we present a system of mirrors to simulate the appearance of camera movement around a scene while the physical camera remains stationary. The system thus is amenable to capturing dynamic events avoiding the need to construct and calibrate an array of cameras. We demonstrate the system with a high speed video of a dynamic scene. We show smooth camera motion rotating 360 degrees around the scene. We discuss the optical performance of our system and compare with alternate setups.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Simulating Spatially Varying Lighting on a Live Performance Inproceedings
In: 3rd European Conference on Visual Media Production (CVMP 2006), London, UK, 2006.
@inproceedings{jones_simulating_2006,
title = {Simulating Spatially Varying Lighting on a Live Performance},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Simulating%20Spatially%20Varying%20Lighting%20on%20a%20Live%20Performance.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {3rd European Conference on Visual Media Production (CVMP 2006)},
address = {London, UK},
abstract = {We present an image-based technique for relighting dynamic human performances under spatially varying illumination. Our system generates a time-multiplexed LED basis and a geometric model recovered from high-speed structured light patterns. The geometric model is used to scale the intensity of each pixel differently according to its 3D position within the spatially varying illumination volume. This yields a first-order approximation of the correct appearance under the spatially varying illumination. A global illumination process removes indirect illumination from the original lighting basis and simulates spatially varying indirect illumination. We demonstrate this technique for a human performance under several spatially varying lighting environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Einarsson, Per; Chabert, Charles-Felix; Jones, Andrew; Ma, Wan-Chun; Lamond, Bruce; Hawkins, Tim; Bolas, Mark; Sylwan, Sebastian; Debevec, Paul
Relighting Human Locomotion with Flowed Reflectance Fields Inproceedings
In: Eurographics Symposium on Rendering (2006), 2006.
@inproceedings{einarsson_relighting_2006,
title = {Relighting Human Locomotion with Flowed Reflectance Fields},
author = {Per Einarsson and Charles-Felix Chabert and Andrew Jones and Wan-Chun Ma and Bruce Lamond and Tim Hawkins and Mark Bolas and Sebastian Sylwan and Paul Debevec},
url = {http://ict.usc.edu/pubs/Relighting%20Human%20Locomotion%20with%20Flowed%20Reflectance%20Fields.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Eurographics Symposium on Rendering (2006)},
abstract = {We present an image-based approach for capturing the appearance of a walking or running person so they can be rendered realistically under variable viewpoint and illumination. In our approach, a person walks on a treadmill at a regular rate as a turntable slowly rotates the person's direction. As this happens, the person is filmed with a vertical array of high-speed cameras under a time-multiplexed lighting basis, acquiring a seven-dimensional dataset of the person under variable time, illumination, and viewing direction in approximately forty seconds. We process this data into a flowed reflectance field using an optical flow algorithm to correspond pixels in neighboring camera views and time samples to each other, and we use image compression to reduce the size of this data.We then use image-based relighting and a hardware-accelerated combination of view morphing and light field rendering to render the subject under user-specified viewpoint and lighting conditions. To composite the person into a scene, we use an alpha channel derived from back lighting and a retroreflective treadmill surface and a visual hull process to render the shadows the person would cast onto the ground. We demonstrate realistic composites of several subjects into real and virtual environments using our technique.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bolas, Mark; Pair, Jarrell; Haynes, Kip; McDowall, Ian
Display Research at the University of Southern California Inproceedings
In: IEEE Emerging Displays Workshop, Alexandria, VA, 2006.
@inproceedings{bolas_display_2006,
title = {Display Research at the University of Southern California},
author = {Mark Bolas and Jarrell Pair and Kip Haynes and Ian McDowall},
url = {http://ict.usc.edu/pubs/Display%20Research%20at%20the%20University%20of%20Southern%20California.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {IEEE Emerging Displays Workshop},
address = {Alexandria, VA},
abstract = {The University of Southern California and its collaborative research partner, Fakespace Labs, are participating in a number of research programs to invent and implement new forms of display technologies for immersive and semi-immersive applications. This paper briefly describes three of these technologies and highlights a few emerging results from those efforts. The first system is a rear projected 300 degree field of view cylindrical display. It is driven by 11 projectors with geometry correction and edge blending hardware. A full scale prototype will be completed in March 2006. The second system is a 14 screen projected panoramic room environment used as an advanced teaching and meeting space. It can be driven by a cluster of personal computers or low-cost DVD players, or driven by a single personal computer. The third is a prototype stereoscopic head mounted display that can be worn in a fashion similar to standard dust protection goggles. It provides a field of view in excess of 150 degrees.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Performance Geometry Capture for Spatially Varying Relighting Inproceedings
In: SIGGRAPH 2005 Sketch, Los Angeles, CA, 2005.
@inproceedings{jones_performance_2005,
title = {Performance Geometry Capture for Spatially Varying Relighting},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Performance%20Geometry%20Capture%20for%20Spatially%20Varying%20Relighting.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH 2005 Sketch},
address = {Los Angeles, CA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.