Publications
Search
Suma, Evan; Lipps, Zachary; Finklestein, Samantha; Krum, David M.; Bolas, Mark
Impossible Spaces: Maximizing Natural Walking in Virtual Environments with Self-Overlapping Architecture Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 18, no. 4, pp. 555–564, 2012.
@article{suma_impossible_2012,
title = {Impossible Spaces: Maximizing Natural Walking in Virtual Environments with Self-Overlapping Architecture},
author = {Evan Suma and Zachary Lipps and Samantha Finklestein and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/Impossible%20Spaces-%20Maximizing%20Natural%20Walking%20in%20Virtual%20Environments%20with%20Self-Overlapping%20Architecture.pdf},
year = {2012},
date = {2012-04-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {18},
number = {4},
pages = {555–564},
abstract = {Walking is only possible within immersive virtual environments that fit inside the boundaries of the user’s physical workspace. To reduce the severity of the restrictions imposed by limited physical area, we introduce “impossible spaces,” a new design mechanic for virtual environments that wish to maximize the size of the virtual environment that can be explored with natural locomotion. Such environments make use of self-overlapping architectural layouts, effectively compressing comparatively large in- terior environments into smaller physical areas. We conducted two formal user studies to explore the perception and experience of impossible spaces. In the first experiment, we showed that reasonably small virtual rooms may overlap by as much as 56% before users begin to detect that they are in an impossible space, and that the larger virtual rooms that expanded to maximally fill our avail- able 9.14m x 9.14m workspace may overlap by up to 31%. Our results also demonstrate that users perceive distances to objects in adjacent overlapping rooms as if the overall space was uncompressed, even at overlap levels that were overtly noticeable. In our second experiment, we combined several well-known redirection techniques to string together a chain of impossible spaces in an expansive outdoor scene. We then conducted an exploratory analysis of users’ verbal feedback during exploration, which indicated that impossible spaces provide an even more powerful illusion when users are naive to the manipulation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hoberman, Perry; Krum, David M.; Suma, Evan; Bolas, Mark
Immersive Training Games for Smartphone-Based Head Mounted Displays Journal Article
In: IEEE Virtual Reality, pp. 151–152, 2012.
@article{hoberman_immersive_2012,
title = {Immersive Training Games for Smartphone-Based Head Mounted Displays},
author = {Perry Hoberman and David M. Krum and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Immersive%20Training%20Games%20for%20Smartphone-Based%20Head%20Mounted%20Displays.pdf},
year = {2012},
date = {2012-03-01},
journal = {IEEE Virtual Reality},
pages = {151–152},
abstract = {Thin computing clients, such as smartphones and tablets, have exhibited recent growth in display resolutions, processing power, and graphical rendering speeds. In this poster, we show how we leveraged these trends to create virtual reality (VR) training games which run entirely on a commodity mobile computing platform. This platform consists of a commercial off-the-shelf game engine, commodity smartphones, and mass produced optics. The games utilize the strengths of this platform to provide immersive features like 360 degree photo panoramas and interactive 3D virtual scenes. By sharing information about building such applications, we hope to enable others to develop new types of mobile VR applications. In particular, we feel this system is ideally suited for casual “pick up and use” VR applications for collaborative classroom learning, design reviews, and other multi-user immersive experiences.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Singh, Gurjot; II, J. Edward Swan; Jones, J. Adam; Ellis, Stephen R.
Depth Judgments by Reaching and Matching in Near-Field Augmented Reality Proceedings Article
In: IEEE Virtual Reality, pp. 165–166, Orange County, CA, 2012.
@inproceedings{singh_depth_2012,
title = {Depth Judgments by Reaching and Matching in Near-Field Augmented Reality},
author = {Gurjot Singh and J. Edward Swan II and J. Adam Jones and Stephen R. Ellis},
url = {http://ict.usc.edu/pubs/Depth%20Judgments%20by%20Reaching%20and%20Matching%20in%20Near-Field%20Augmented%20Reality.pdf},
year = {2012},
date = {2012-03-01},
booktitle = {IEEE Virtual Reality},
pages = {165–166},
address = {Orange County, CA},
abstract = {In this abstract we describe an experiment that measured depth judgments in optical see-through augmented reality (AR) at near-field reaching distances of 24 to 56 cm. The 2⬚2 experiment crossed two depth judgment tasks, perceptual matching and blind reaching, with two different environments, a real-world environment and an augmented reality environment. We designed a task that used a direct reaching gesture at constant percentages of each participant's maximum reach; our task was inspired by previous work by Tresilian and Mon-Williams [6] that found very accurate blind reaching results in a real-world environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Bruder, Gerd; Steinicke, Frank; Krum, David M.; Bolas, Mark
A Taxonomy for Deploying Redirection Techniques in Immersive Virtual Environments Proceedings Article
In: IEEE Virtual Reality, Orange County, CA, 2012.
@inproceedings{suma_taxonomy_2012,
title = {A Taxonomy for Deploying Redirection Techniques in Immersive Virtual Environments},
author = {Evan Suma and Gerd Bruder and Frank Steinicke and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/A%20Taxonomy%20for%20Deploying%20Redirection%20Techniques%20in%20Immersive%20Virtual%20Environments.pdf},
year = {2012},
date = {2012-03-01},
booktitle = {IEEE Virtual Reality},
address = {Orange County, CA},
abstract = {Natural walking can provide a compelling experience in immersive virtual environments, but it remains an implementation challenge due to the physical space constraints imposed on the size of the virtual world. The use of redirection techniques is a promising approach that relaxes the space requirements of natural walking by manipulating the user’s route in the virtual environment, causing the real world path to remain within the boundaries of the physical workspace. In this paper, we present and apply a novel taxonomy that separates redirection techniques according to their geometric flexibility versus the likelihood that they will be noticed by users. Additionally, we conducted a user study of three reorientation techniques, which confirmed that participants were less likely to experience a break in presence when reoriented using the techniques classified as subtle in our taxonomy. Our results also suggest that reorientation with change blindness illusions may give the impression of exploring a more expansive environment than continuous rotation techniques, but at the cost of negatively impacting spatial knowledge acquisition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Burba, Nathan; Bolas, Mark; Krum, David M.; Suma, Evan
Unobtrusive Measurement of Subtle Nonverbal Behaviors with the Microsoft Kinect Proceedings Article
In: IEEE VR Workshop on Ambient Information Technologies, pp. 10–13, Orange County, CA, 2012.
@inproceedings{burba_unobtrusive_2012,
title = {Unobtrusive Measurement of Subtle Nonverbal Behaviors with the Microsoft Kinect},
author = {Nathan Burba and Mark Bolas and David M. Krum and Evan Suma},
url = {http://ict.usc.edu/pubs/Unobtrusive%20Measurement%20of%20Subtle%20Nonverbal%20Behaviors%20with%20the%20Microsoft%20Kinect.pdf},
year = {2012},
date = {2012-03-01},
booktitle = {IEEE VR Workshop on Ambient Information Technologies},
pages = {10–13},
address = {Orange County, CA},
abstract = {We describe two approaches for unobtrusively sensing subtle nonverbal behaviors using a consumer-level depth sensing camera. The first signal, respiratory rate, is estimated by measuring the visual expansion and contraction of the user's chest cavity during inhalation and exhalation. Additionally, we detect a specific type of fidgeting behavior, known as "leg jiggling," by measuring high-frequency vertical oscillations of the user's knees. Both of these techniques rely on the combination of skeletal tracking information with raw depth readings from the sensor to identify the cyclical patterns in jittery, low-resolution data. Such subtle nonverbal signals may be useful for informing models of users' psy- chological states during communication with virtual human agents, thereby improving interactions that address important societal challenges in domains including education, training, and medicine.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoberman, Perry; Sacher, Andrew; Turpin, David; Gotsis, Marientina; Bolas, Mark; Varma, Rohit
Using the Phantogram Technique for a Collaborative Stereoscopic Multitouch Tabletop Game Proceedings Article
In: International Conference on Creating, Connecting and Collaborating through Computing, 2012.
@inproceedings{hoberman_using_2012,
title = {Using the Phantogram Technique for a Collaborative Stereoscopic Multitouch Tabletop Game},
author = {Perry Hoberman and Andrew Sacher and David Turpin and Marientina Gotsis and Mark Bolas and Rohit Varma},
url = {http://ict.usc.edu/pubs/Using%20the%20Phantogram%20Technique%20for%20a%20Collaborative%20Stereoscopic%20Multitouch%20Tabletop%20Game.pdf},
year = {2012},
date = {2012-01-01},
booktitle = {International Conference on Creating, Connecting and Collaborating through Computing},
abstract = {This paper outlines the design of a stereoscopic project utilizing the 3D phantogram technique (anamorphically distorted projection onto a horizontal surface) to implement a two-person multitouch game using the Unity 3D engine and IZ3D drivers on the Microsoft Surface tabletop display. The purpose of the project was to develop a engaging platform for the communication of basic concepts about vision and perception for a target audience of children under six and their families in a research clinic setting. Viewed from an appropriate height and position, virtual objects and characters appear to stand directly on the tabletop, facilitating a direct and intuitive mixed reality interface. The technical challenges included occlusion of stereoscopic images by users' hands, the generation of appropriate perspectives for multiple users, and the integration of a two-dimensional multitouch surface with a three-dimensional stereoscopic display.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Suma, Evan; Bolas, Mark
Augmented Reality using Personal Projection and Retroflection Journal Article
In: Personal and Ubiquitous Computing, vol. 16, no. 1, pp. 17–26, 2012.
@article{krum_augmented_2012,
title = {Augmented Reality using Personal Projection and Retroflection},
author = {David M. Krum and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Augmented%20Reality%20using%20Personal%20Projection%20and%20Retroflection.pdf},
year = {2012},
date = {2012-01-01},
journal = {Personal and Ubiquitous Computing},
volume = {16},
number = {1},
pages = {17–26},
abstract = {The support of realistic and flexible training simulations for military, law enforcement, emergency response, and other domains has been an important motivator for the development of augmented reality technology. An important vision for achieving this goal has been the creation of a versatile "stage" for physical, emotional, and cognitive training that combines virtual characters and environments with real world elements, such as furniture and props. This paper presents REFLCT, a mixed reality projection framework that couples a near-axis personal projector design with tracking and novel retroreflective props and surfaces. REFLCT provides multiple users with personalized, perspective correct imagery that is uniquely composited for each user directly into and onto a surrounding environment, without any optics positioned in front of the user’s eyes or face. These characteristics facilitate team training experiences which allow users to easily interact with their teammates while wearing their standard issue gear. REFLCT can present virtual humans who can make deictic gestures and establish eye contact without the geometric ambiguity of a typical projection display. It can also display perspective correct scenes that require a realistic approach for detecting and communicating potential threats between multiple users in disparate locations. In addition to training applications, this display system appears to be well-matched with other user interface and application domains, such as asymmetric collaborative workspaces and personal information guides.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jones, Andrew; Fyffe, Graham; Yu, Xueming; Ma, Wan-Chun; Busch, Jay; Ichikari, Ryosuke; Bolas, Mark; Debevec, Paul
Head-mounted Photometric Stereo for Performance Capture Proceedings Article
In: 8th European Conference on Visual Media Production (CVMP 2011), London, UK, 2011.
@inproceedings{jones_head-mounted_2011,
title = {Head-mounted Photometric Stereo for Performance Capture},
author = {Andrew Jones and Graham Fyffe and Xueming Yu and Wan-Chun Ma and Jay Busch and Ryosuke Ichikari and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Head-mounted%20Photometric%20Stereo%20for%20Performance%20Capture.pdf},
year = {2011},
date = {2011-11-01},
booktitle = {8th European Conference on Visual Media Production (CVMP 2011)},
address = {London, UK},
abstract = {Head-mounted cameras are an increasingly important tool for capturing facial performances to drive virtual characters. They provide a fixed, unoccluded view of the face, useful for observing motion capture dots or as input to video analysis. However, the 2D imagery captured with these systems is typically affected by ambient light and generally fails to record subtle 3D shape changes as the face performs. We have developed a system that augments a head-mounted camera with LED-based photometric stereo. The system allows observation of the face independent of the ambient light and generates per-pixel surface normals so that the performance is recorded dynamically in 3D. The resulting data can be used for facial relighting or as better input to machine learning algorithms for driving an animated face.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Lange, Belinda; Suma, Evan; Bolas, Mark
Virtual Reality and Interactive Digital Game Technology: New Tools to Address Obesity and Diabetes Journal Article
In: Journal of Diabetes Science and Technology, vol. 5, no. 2, pp. 256–264, 2011.
@article{rizzo_virtual_2011,
title = {Virtual Reality and Interactive Digital Game Technology: New Tools to Address Obesity and Diabetes},
author = {Albert Rizzo and Belinda Lange and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20and%20Interactive%20Digital%20Game%20Technology-%20New%20Tools%20to%20Address%20Obesity%20and%20Diabetes.pdf},
year = {2011},
date = {2011-03-01},
journal = {Journal of Diabetes Science and Technology},
volume = {5},
number = {2},
pages = {256–264},
abstract = {The convergence of the exponential advances in virtual reality (VR)-enabling technologies with a growing body of clinical research and experience has fueled the evolution of the discipline of clinical VR. This article begins with a brief overview of methods for producing and delivering VR environments that can be accessed by users for a range of clinical health conditions. Interactive digital games and new forms of natural movementbased interface devices are also discussed in the context of the emerging area of exergaming, along with some of the early results from studies of energy expenditure during the use of these systems. While these results suggest that playing currently available active exergames uses significantly more energy than sedentary activities and is equivalent to a brisk walk, these activities do not reach the level of intensity that would match playing the actual sport, nor do they deliver the recommended daily amount of exercise for children. However, these results provide some support for the use of digital exergames using the current state of technology as a complement to, rather than a replacement, for regular exercise. This may change in the future as new advances in novel full-body interaction systems for providing vigorous interaction with digital games are expected to drive the creation of engaging, low-cost interactive game-based applications designed to increase exercise participation in persons at risk for obesity.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Suma, Evan; Lange, Belinda; Rizzo, Albert; Krum, David M.; Bolas, Mark
FAAST: The Flexible Action and Articulated Skeleton Toolkit Proceedings Article
In: IEEE Virtual Reality, pp. 245–246, Singapore, 2011.
@inproceedings{suma_faast_2011,
title = {FAAST: The Flexible Action and Articulated Skeleton Toolkit},
author = {Evan Suma and Belinda Lange and Albert Rizzo and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/FAAST-%20The%20Flexible%20Action%20and%20Articulated%20Skeleton%20Toolkit.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Virtual Reality},
pages = {245–246},
address = {Singapore},
abstract = {The Flexible Action and Articulated Skeleton Toolkit (FAAST) is middleware to facilitate integration of full-body control with virtual reality applications and video games using OpenNI-compliant depth sensors (currently the PrimeSensor and the Microsoft Kinect). FAAST incorporates a VRPN server for streaming the user's skeleton joints over a network, which provides a convenient interface for custom virtual reality applications and games. This body pose information can be used for goals such as realistically puppeting a virtual avatar or controlling an on-screen mouse cursor. Additionally, the toolkit also provides a configurable input emulator that detects human actions and binds them to virtual mouse and keyboard commands, which are sent to the actively selected window. Thus, FAAST can enable natural interaction for existing off-the-shelf video games that were not explicitly developed to support input from motion sensors. The actions and input bindings are configurable at run-time, allowing the user to customize the controls and sensitivity to adjust for individual body types and preferences. In the future, we plan to substantially expand FAAST’s action lexicon, provide support for recording and training custom gestures, and incorporate real-time head tracking using computer vision techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Krum, David M.; Bolas, Mark
Sharing Space in Mixed and Virtual Reality Environments Using a Low-Cost Depth Sensor Proceedings Article
In: IEEE International Symposium on Virtual Reality Innovations, Singapore, 2011.
@inproceedings{suma_sharing_2011,
title = {Sharing Space in Mixed and Virtual Reality Environments Using a Low-Cost Depth Sensor},
author = {Evan Suma and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/Sharing%20Space%20in%20Mixed%20and%20Virtual%20Reality%20Environments%20Using%20a%20Low-Cost%20Depth%20Sensor.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE International Symposium on Virtual Reality Innovations},
address = {Singapore},
abstract = {We describe an approach for enabling people to share virtual space with a user that is fully immersed in a head-mounted display. By mounting a recently developed low-cost depth sensor to the user's head, depth maps can be generated in real-time based on the user's gaze direction, allowing us to create mixed reality experiences by merging real people and objects into the virtual environment. This enables verbal and nonverbal communication between users that would normally be isolated from one another. We present the implementation of the technique, then discuss the advantages and limitations of using commercially available depth sensing technology in immersive virtual reality applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Clark, Seth; Finklestein, Samantha; Wartell, Zachary; Krum, David M.; Bolas, Mark
Leveraging Change Blindness for Redirection in Virtual Environments Proceedings Article
In: IEEE Virtual Reality, pp. 159–166, 2011.
@inproceedings{suma_leveraging_2011,
title = {Leveraging Change Blindness for Redirection in Virtual Environments},
author = {Evan Suma and Seth Clark and Samantha Finklestein and Zachary Wartell and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/Leveraging%20Change%20Blindness%20for%20Redirection%20in%20Virtual%20Environments.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Virtual Reality},
pages = {159–166},
abstract = {We present change blindness redirection, a novel technique for allowing the user to walk through an immersive virtual environment that is considerably larger than the available physical workspace. This approach, based on a dynamic environment model, improves on previous redirection techniques, as it does not introduce any visual-vestibular conflicts from manipulating the mapping between physical and virtual motions, nor does it require breaking presence to stop and explicitly reorient the user. We conducted two user studies to evaluate the effectiveness of the change blindness illusion when exploring a virtual environment that was an order of magnitude larger than the physical walking space. Despite the dynamically changing environment, participants were able to draw coherent sketch maps of the environment structure, and pointing task results indicated that they were able to maintain their spatial orientation within the virtual world. Only one out of 77 participants across both both studies definitively noticed that a scene change had occurred, suggesting that change blindness redirection provides a remarkably compelling illusion. Secondary findings revealed that a wide field-of-view increases pointing accuracy and that experienced gamers reported greater sense of presence than those with little or no experience with 3D video games.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Krum, David M.; Finklestein, Samantha; Bolas, Mark
Effects of Redirection on Spatial Orientation in Real and Virtual Environments Proceedings Article
In: IEEE Symposium on 3D User Interfaces, pp. 35–38, Singapore, 2011.
@inproceedings{suma_effects_2011,
title = {Effects of Redirection on Spatial Orientation in Real and Virtual Environments},
author = {Evan Suma and David M. Krum and Samantha Finklestein and Mark Bolas},
url = {http://ict.usc.edu/pubs/Effects%20of%20Redirection%20on%20Spatial%20Orientation%20in%20Real%20and%20Virtual%20Environments.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Symposium on 3D User Interfaces},
pages = {35–38},
address = {Singapore},
abstract = {We report a user study that investigated the effect of redirection in an immersive virtual environment on spatial orientation relative to both real world and virtual stimuli. Participants performed a series of spatial pointing tasks with real and virtual targets, during which they experienced three within-subjects conditions: rotation-based redirection, change blindness redirection, and no redirection. Our results indicate that when using the rotation technique, participants spatially updated both their virtual and real world orientations during redirection, resulting in pointing accuracy to the targets' recomputed positions that was strikingly similar to the control condition. While our data also suggest that a similar spatial updating may have occurred when using a change blindness technique, the realignment of targets appeared to be more complicated than a simple rotation, and was thus difficult to measure quantitatively.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olson, Logan; Krum, David M.; Suma, Evan; Bolas, Mark
A Design for a Smartphone-Based Head Mounted Display Proceedings Article
In: IEEE Virtual Reality, pp. 233–234, Singapore, 2011.
@inproceedings{olson_design_2011,
title = {A Design for a Smartphone-Based Head Mounted Display},
author = {Logan Olson and David M. Krum and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/A%20Design%20for%20a%20Smartphone-Based%20Head%20Mounted%20Display.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Virtual Reality},
pages = {233–234},
address = {Singapore},
abstract = {Thin computing clients, such as smartphones and tablets, have experienced recent growth in display resolutions and graphics processing power. In this poster, we show how to leverage these trends to create an experimental wide field of view, 3D stereoscopic head mounted display (HMD), based on two high resolution smartphones. This HMD prototype is unique in that the graphics system is entirely onboard, allowing it to be lightweight, wireless, and convenient to use.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Babul, Sabarish V.; Suma, Evan; Hodges, Larry F.; Barnes, Tiffany
Learning Cultural Conversation Protocols with Immersive Interactive Virtual Humans Journal Article
In: International Journal of Artificial Intelligence, vol. 10, no. 4, pp. 25–35, 2011.
@article{babul_learning_2011,
title = {Learning Cultural Conversation Protocols with Immersive Interactive Virtual Humans},
author = {Sabarish V. Babul and Evan Suma and Larry F. Hodges and Tiffany Barnes},
url = {http://ict.usc.edu/pubs/Learning%20Cultural%20Conversation%20Protocols%20with%20Immersive%20Interactive%20Virtual%20Humans.pdf},
year = {2011},
date = {2011-01-01},
journal = {International Journal of Artificial Intelligence},
volume = {10},
number = {4},
pages = {25–35},
abstract = {This paper reports on a study conducted to investigate the effects of using immersive virtual humans in natural multi-modal interaction to teach users cultural conversational verbal and non-verbal protocols in south Indian culture. The study was conducted using a between-subjects experimental design. We compared instruction and interactive feedback from immersive virtual humans against instruction based on a written study guide with illustrations of the cultural protocols. Participants were then tested on how well they learned the cultural conversational protocols by exercising the cultural conventions in front of videos of real people. Subjective evaluations of participants' performance was conducted by three south Indian reviewers who were blind to the condition the participants were assigned. Objective evaluations of participants' performance were conducted on the motion tracking log data recorded during the testing session. We also measured the participants' pre and post positive and negative affect of training in both conditions, as well as the effect of co-presence with the life-size virtual south Indians. The results of our subjective evaluation suggest that participants who trained with the virtual humans performed significantly better than the participants who studied from literature. The results also revealed that there were no significant differences in positive or negative affect between conditions. However, overall for all participants in both conditions, positive affect increased and negative affect decreased from before to after instruction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lange, Belinda; Rizzo, Albert; Chang, Chien-Yen; Suma, Evan; Bolas, Mark
Markerless Full Body Tracking: Depth-Sensing Technology within Virtual Environments Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), Orlando, FL, 2011.
@inproceedings{lange_markerless_2011,
title = {Markerless Full Body Tracking: Depth-Sensing Technology within Virtual Environments},
author = {Belinda Lange and Albert Rizzo and Chien-Yen Chang and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Markerless%20Full%20Body%20Tracking-%20Depth-Sensing%20Technology%20within%20Virtual%20Environments.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {Over the last decade there has been growing recognition of the potential value of virtual reality and game technology for creating a new generation of tools for advancing rehabilitation, training and exercise activities. However, until recently the only way people could interact with digital games and virtual reality simulations, was by using relatively constrained gamepad, joystick and keyboard interface devices. Thus, rather than promoting physical activity, these modes of interaction encourage a more sedentary approach to playing games, typically while seated on the couch or in front of a desk. More complex and expensive motion tracking systems enable immersive interactions but are only available at restricted locations and are not readily available in the home setting. Recent advances in video game technology have fueled a proliferation of low-cost devices that can sense the user's motion. This paper will present and discuss three potential applications of the new depth-sensing camera technology from PrimeSense and Microsoft Kinect. The paper will outline the technology underlying the sensor, the development of our open source middleware allowing developers to make applications, and provide examples of applications that enhance interaction within virtual environments and game-based training/rehabilitation tools. The PrimeSense or Kinect sensors, along with open source middleware, provide markerless full-body tracking on a conventional PC using a single plug and play USB sensor. This technology provides a fully articulated skeleton that digitizes the user's body pose and directly quantizes their movements in real time without encumbering the user with tracking devices or markers. We have explored the integration of the depth sensing technology and middleware within three applications: 1) virtual environments, 2) gesture controlled PC games, 3) a game developed to target specific movements for rehabilitation. The benefits of implementing this technology in these three areas demonstrate the potential to provide needed applications for modern-day warfighters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jurik, Joel; Jones, Andrew; Bolas, Mark; Debevec, Paul
Prototyping a Light Field Display Involving Direct Observation of a Video Projector Array Proceedings Article
In: IEEE International Workshop on Projector–Camera Systems (PROCAMS), Colorado Springs, CO, 2011.
@inproceedings{jurik_prototyping_2011,
title = {Prototyping a Light Field Display Involving Direct Observation of a Video Projector Array},
author = {Joel Jurik and Andrew Jones and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Prototyping%20a%20Light%20Field%20Display%20Involving%20Direct%20Observation%20of%20a%20Video%20Projector%20Array.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {IEEE International Workshop on Projector–Camera Systems (PROCAMS)},
address = {Colorado Springs, CO},
abstract = {We present a concept for a full-parallax light field display achieved by having users look directly into an array of video projectors. Each projector acts as one angularly varying pixel, so the display's spatial resolution depends on the number of video projectors and the angular resolution depends on the pixel resolution of any one video projector. We prototype a horizontal-parallax-only arrangement by mechanically moving a single pico-projector to an array of positions, and use long-exposure photography to simulate video of a horizontal array. With this setup, we determine the minimal projector density required to produce a continuous image, and describe practical ways to achieve such density and to realize the resulting system. We finally show that if today's pico-projectors become sufficiently inexpensive, immersive full-parallax displays with arbitrarily high spatial and angular resolution will become possible.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lange, Belinda; Suma, Evan; Newman, Brad; Phan, Thai; Chang, Chien-Yen; Rizzo, Albert; Bolas, Mark
Leveraging Unencumbered Full Body Control of Animated Virtual Characters for Game-Based Rehabilitation Proceedings Article
In: HCI International, 2011.
@inproceedings{lange_leveraging_2011,
title = {Leveraging Unencumbered Full Body Control of Animated Virtual Characters for Game-Based Rehabilitation},
author = {Belinda Lange and Evan Suma and Brad Newman and Thai Phan and Chien-Yen Chang and Albert Rizzo and Mark Bolas},
url = {http://ict.usc.edu/pubs/Leveraging%20Unencumbered%20Full%20Body%20Control%20of%20Animated%20Virtual%20Characters%20for%20Game-Based%20Rehabilitation.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {HCI International},
abstract = {The use of commercial video games as rehabilitation tools, such as the Nintendo Wii Fit, has recently gained much interest in the physical therapy arena. However, physical rehabilitation requires accurate and appropriate tracking and feedback of performance, often not provided by existing commercial console devices or games. This paper describes the development of an application that leverages recent advances in commercial video game technology to provide full-body control of animated virtual characters with low cost markerless tracking. The aim of this research is to develop and evaluate an interactive game-based rehabilitation tool for balance training of adults with neurological injury. This paper outlines the development and evaluation of a game-based rehabilitation tool using the PrimeSense depth sensing technology, designed to elicit specific therapeutic motions when controlling a virtual avatar in pursuit of in-game goals. A sample of nine adults participated in the initial user testing, providing feedback on the hardware and software prototype.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sadek, Ramy; Krum, David M.; Bolas, Mark
Simulating Hearing Loss in Virtual Training Proceedings Article
In: Audio Engineering Society, San Francisco, CA, 2010.
@inproceedings{sadek_simulating_2010,
title = {Simulating Hearing Loss in Virtual Training},
author = {Ramy Sadek and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/Simulating%20Hearing%20Loss%20in%20Virtual%20Training.pdf},
year = {2010},
date = {2010-11-01},
booktitle = {Audio Engineering Society},
address = {San Francisco, CA},
abstract = {Audio systems for virtual reality and augmented reality training environments commonly focus on high-quality audio reproduction. Yet many trainees may face real-world situations wherein hearing is compromised. In these cases, the hindrance caused by impaired or lost hearing is a significant stressor that may affect performance. Because this phenomenon is hard to simulate without actually causing hearing damage, trainees are largely unpracticed at operating with diminished hearing. To improve the match between training scenarios and real-world situations, this effort aims to add simulated hearing loss or impairment as a training variable. The goal is to affect everything users hear –including non-simulated sounds such as their own and each other's voices – without overt noticeability, risk to hearing, or requiring headphones.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sadek, Ramy
Automatic Parallelism for Dataflow Graphs Proceedings Article
In: Audio Engineering Society, San Francisco, CA, 2010.
@inproceedings{sadek_automatic_2010,
title = {Automatic Parallelism for Dataflow Graphs},
author = {Ramy Sadek},
url = {http://ict.usc.edu/pubs/Automatic%20Parallelism%20for%20Dataflow%20Graphs.pdf},
year = {2010},
date = {2010-11-01},
booktitle = {Audio Engineering Society},
address = {San Francisco, CA},
abstract = {This paper presents a novel algorithm to automate high-level parallelization from graph-based data structures representing data flow. This automatic optimization yields large performance improvements for multi-core machines running host-based applications. Results of these advances are shown through their incorporation into the audio processing engine Application Rendering Immersive Audio (ARIA) presented at AES 117. Although the ARIA system is the target framework, the contributions presented in this paper are generic and therefore applicable in a variety of software such as Pure Data and Max/MSP, game audio engines, non-linear editors and related systems. Additionally, the parallel execution paths extracted are shown to give effectively optimal cache performance, yielding significant speedup for such host-based applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.