Publications
Search
Feng, Andrew; Rosenberg, Evan Suma; Shapiro, Ari
Just-in-time, viable, 3D avatars from scans Journal Article
In: Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents), vol. 28, no. 3-4, 2017.
@article{feng_just--time_2017,
title = {Just-in-time, viable, 3D avatars from scans},
author = {Andrew Feng and Evan Suma Rosenberg and Ari Shapiro},
url = {http://onlinelibrary.wiley.com/doi/10.1002/cav.1769/epdf},
doi = {10.1002/cav.1769},
year = {2017},
date = {2017-05-01},
journal = {Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents)},
volume = {28},
number = {3-4},
abstract = {We demonstrate a system that can generate a photorealistic, interactive 3-D character from a human subject that is capable of movement, emotion, speech, and gesture in less than 20 min without the need for 3-D artist intervention or specialized technical knowledge through a near automatic process. Our method uses mostly commodity or off-the-shelf hardware. We demonstrate the just-in-time use of generating such 3-D models for virtual and augmented reality, games, simulation, and communication. We anticipate that the inexpensive generation of such photorealistic models will be useful in many venues where a just-in-time 3-D reconstructions of digital avatars that resemble particular human subjects is necessary.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Spicer, Ryan P.; Russell, Stephen M.; Rosenberg, Evan Suma
The mixed reality of things: emerging challenges for human-information interaction Inproceedings
In: Proceedings Volume 10207, Next-Generation Analyst V, SPIE, Anaheim, CA, 2017.
@inproceedings{spicer_mixed_2017,
title = {The mixed reality of things: emerging challenges for human-information interaction},
author = {Ryan P. Spicer and Stephen M. Russell and Evan Suma Rosenberg},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2268004},
doi = {10.1117/12.2268004},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings Volume 10207, Next-Generation Analyst V},
publisher = {SPIE},
address = {Anaheim, CA},
abstract = {Virtual and mixed reality technology has advanced tremendously over the past several years. This nascent medium has the potential to transform how people communicate over distance, train for unfamiliar tasks, operate in challenging environments, and how they visualize, interact, and make decisions based on complex data. At the same time, the marketplace has experienced a proliferation of network-connected devices and generalized sensors that are becoming increasingly accessible and ubiquitous. As the "Internet of Things" expands to encompass a predicted 50 billion connected devices by 2020, the volume and complexity of information generated in pervasive and virtualized environments will continue to grow exponentially. The convergence of these trends demands a theoretically grounded research agenda that can address emerging challenges for human-information interaction (HII). Virtual and mixed reality environments can provide controlled settings where HII phenomena can be observed and measured, new theories developed, and novel algorithms and interaction techniques evaluated. In this paper, we describe the intersection of pervasive computing with virtual and mixed reality, identify current research gaps and opportunities to advance the fundamental understanding of HII, and discuss implications for the design and development of cyber-human systems for both military and civilian use.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Kang, Sin-Hwa; Phan, Thai; Dukes, Lauren Cairco; Bolas, Mark
Social Impact of Enhanced Gaze Presentation Using Head Mounted Projection Inproceedings
In: Proceedings of the Human-Computer Interaction International Conference, Springer International Publishing, Vancouver, Canada, 2017, ISBN: 978-3-319-58696-0 978-3-319-58697-7.
@inproceedings{krum_social_2017,
title = {Social Impact of Enhanced Gaze Presentation Using Head Mounted Projection},
author = {David M. Krum and Sin-Hwa Kang and Thai Phan and Lauren Cairco Dukes and Mark Bolas},
url = {https://link.springer.com/chapter/10.1007/978-3-319-58697-7_5},
isbn = {978-3-319-58696-0 978-3-319-58697-7},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Human-Computer Interaction International Conference},
publisher = {Springer International Publishing},
address = {Vancouver, Canada},
abstract = {Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals in a group of observers. This could result in perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers in the room regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing a weapon. In this paper, we discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen; Amir, Ori; Lin, Rebecca
Social influence of humor in virtual human counselor's self-disclosure Journal Article
In: Computer Animation and Virtual Worlds, vol. 28, no. 3-4, 2017, ISSN: 15464261.
@article{kang_social_2017,
title = {Social influence of humor in virtual human counselor's self-disclosure},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang and Ori Amir and Rebecca Lin},
url = {http://doi.wiley.com/10.1002/cav.1763},
doi = {10.1002/cav.1763},
issn = {15464261},
year = {2017},
date = {2017-04-01},
journal = {Computer Animation and Virtual Worlds},
volume = {28},
number = {3-4},
abstract = {We explored the social influence of humor in a virtual human counselor's selfdisclosure while also varying the ethnicity of the virtual counselor. In a 2 × 3 experiment (humor and ethnicity of the virtual human counselor), participants experienced counseling interview interactions via Skype on a smartphone. We measured user responses to and perceptions of the virtual human counselor. The results demonstrate that humor positively affects user responses to and perceptions of a virtual counselor. The results further suggest that matching styles of humor with a virtual counselor's ethnicity influences user responses and perceptions. The results offer insight into the effective design and development of realistic and believable virtual human counselors. Furthermore, they illuminate the potential use of humor to enhance self‐disclosure in human–agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Khooshabeh, Peter; Choromanski, Igor; Neubauer, Catherine; Krum, David M.; Spicer, Ryan; Campbell, Julia
Mixed Reality Training for Tank Platoon Leader Communication Skills Inproceedings
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 333–334, IEEE, Los Angeles, CA, 2017.
@inproceedings{khooshabeh_mixed_2017,
title = {Mixed Reality Training for Tank Platoon Leader Communication Skills},
author = {Peter Khooshabeh and Igor Choromanski and Catherine Neubauer and David M. Krum and Ryan Spicer and Julia Campbell},
url = {http://ieeexplore.ieee.org/document/7892312/#full-text-section},
doi = {10.1109/VR.2017.7892312},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {333--334},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Here we describe the design and usability evaluation of a mixed reality prototype to simulate the role of a tank platoon leader, who is an individual who not only is a tank commander, but also directs a platoon of three other tanks with their own respective tank commanders. The domain of tank commander training has relied on physical simulators of the actual Abrams tank and encapsulates the whole crew. The TALK-ON system we describe here focuses on training communication skills of the leader in a simulated tank crew. We report results from a usability evaluation and discuss how they will inform our future work for collective tank training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ard, Tyler; Krum, David M.; Phan, Thai; Duncan, Dominique; Essex, Ryan; Bolas, Mark; Toga, Arthur
NIVR: Neuro Imaging in Virtual Reality Inproceedings
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 465–466, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{ard_nivr_2017,
title = {NIVR: Neuro Imaging in Virtual Reality},
author = {Tyler Ard and David M. Krum and Thai Phan and Dominique Duncan and Ryan Essex and Mark Bolas and Arthur Toga},
url = {http://ieeexplore.ieee.org/abstract/document/7892381/},
doi = {10.1109/VR.2017.7892381},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {465--466},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Visualization is a critical component of neuroimaging, and how to best view data that is naturally three dimensional is a long standing question in neuroscience. Many approaches, programs, and techniques have been developed specifically for neuroimaging. However, exploration of 3D information through a 2D screen is inherently limited. Many neuroscientific researchers hope that with the recent commercialization and popularization of VR, it can offer the next-step in data visualization and exploration. Neuro Imaging in Virtual Reality (NIVR), is a visualization suite that employs various immersive visualizations to represent neuroimaging information in VR. Some established techniques, such as raymarching volume visualization, are paired with newer techniques, such as near-field rendering, to provide a broad basis of how we can leverage VR to improve visualization and navigation of neuroimaging data. Several of the neuroscientific visualization approaches presented are, to our knowledge, the first of their kind. NIVR offers not only an exploration of neuroscientific data visualization, but also a tool to expose and educate the public regarding recent advancements in the field of neuroimaging. By providing an engaging experience to explore new techniques and discoveries in neuroimaging, we hope to spark scientific interest through a broad audience. Furthermore, neuroimaging offers deep and expansive datasets; a single scan can involve several gigabytes of information. Visualization and exploration of this type of information can be challenging, and real-time exploration of this information in VR even more so. NIVR explores pathways which make this possible, and offers preliminary stereo visualizations of these types of massive data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Chih-Fan; Bolas, Mark; Rosenberg, Evan Suma
Rapid Creation of Photorealistic Virtual Reality Content with Consumer Depth Cameras Inproceedings
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 473–474, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{chen_rapid_2017,
title = {Rapid Creation of Photorealistic Virtual Reality Content with Consumer Depth Cameras},
author = {Chih-Fan Chen and Mark Bolas and Evan Suma Rosenberg},
url = {http://ieeexplore.ieee.org/abstract/document/7892385/},
doi = {10.1109/VR.2017.7892385},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {473--474},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Virtual objects are essential for building environments in virtual reality (VR) applications. However, creating photorealistic 3D models is not easy, and handcrafting the detailed 3D model from a real object can be time and labor intensive. An alternative way is to build a structured camera array such as a light-stage to reconstruct the model from a real object. However, these technologies are very expensive and not practical for most users. In this work, we demonstrate a complete end-to-end pipeline for the capture, processing, and rendering of view-dependent 3D models in virtual reality from a single consumer-grade RGB-D camera. The geometry model and the camera trajectories are automatically reconstructed from a RGB-D image sequence captured offline. Based on the HMD position, selected images are used for real-time model rendering. The result of this pipeline is a 3D mesh with view-dependent textures suitable for real-time rendering in virtual reality. Specular reflections and light-burst effects are especially noticeable when users view the objects from different perspectives in a head-tracked environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Rosenberg, Evan Suma
An Evaluation of Strategies for Two-User Redirected Walking in Shared Physical Spaces Inproceedings
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 91–98, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{azmandian_evaluation_2017,
title = {An Evaluation of Strategies for Two-User Redirected Walking in Shared Physical Spaces},
author = {Mahdi Azmandian and Timofey Grechkin and Evan Suma Rosenberg},
url = {http://ieeexplore.ieee.org/abstract/document/7892235/},
doi = {10.1109/VR.2017.7892235},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {91--98},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {As the focus of virtual reality technology is shifting from singleperson experiences to multi-user interactions, it becomes increasingly important to accommodate multiple co-located users within a shared real-world space. For locomotion and navigation, the introduction of multiple users moving both virtually and physically creates additional challenges related to potential user-on-user collisions. In this work, we focus on defining the extent of these challenges, in order to apply redirected walking to two users immersed in virtual reality experiences within a shared physical tracked space. Using a computer simulation framework, we explore the costs and benefits of splitting available physical space between users versus attempting to algorithmically prevent user-to-user collisions. We also explore fundamental components of collision prevention such as steering the users away from each other, forced stopping, and user re-orientation. Each component was analyzed for the number of potential disruptions to the flow of the virtual experience. We also develop a novel collision prevention algorithm that reduces overall interruptions by 17.6% and collision prevention events by 58.3%. Our results show that sharing space using our collision prevention method is superior to subdividing the tracked space.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan; Anglin, Julia; Krum, David M.; Liew, Sook-Lei
REINVENT: A Low-Cost, Virtual Reality Brain-Computer Interface for Severe Stroke Upper Limb Motor Recovery Inproceedings
In: Proceedings of the IEEE Virtual Reality Conference, pp. 385–386, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{spicer_reinvent_2017,
title = {REINVENT: A Low-Cost, Virtual Reality Brain-Computer Interface for Severe Stroke Upper Limb Motor Recovery},
author = {Ryan Spicer and Julia Anglin and David M. Krum and Sook-Lei Liew},
url = {http://ieeexplore.ieee.org/abstract/document/7892338/},
doi = {10.1109/VR.2017.7892338},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of the IEEE Virtual Reality Conference},
pages = {385--386},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {There are few effective treatments for rehabilitation of severe motor impairment after stroke. We developed a novel closed-loop neurofeedback system called REINVENT to promote motor recovery in this population. REINVENT (Rehabilitation Environment using the Integration of Neuromuscular-based Virtual Enhancements for Neural Training) harnesses recent advances in neuroscience, wearable sensors, and virtual technology and integrates low-cost electroencephalography (EEG) and electromyography (EMG) sensors with feedback in a head-mounted virtual reality display (VR) to provide neurofeedback when an individual's neuromuscular signals indicate movement attempt, even in the absence of actual movement. Here we describe the REINVENT prototype and provide evidence of the feasibility and safety of using REINVENT with older adults.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Phan, Thai; Kang, Sin-Hwa
Motor Adaptation in Response to Scaling and Diminished Feedback in Virtual Reality Inproceedings
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 233–234, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{krum_motor_2017,
title = {Motor Adaptation in Response to Scaling and Diminished Feedback in Virtual Reality},
author = {David M. Krum and Thai Phan and Sin-Hwa Kang},
url = {http://ieeexplore.ieee.org/document/7892262/#full-text-section},
doi = {10.1109/VR.2017.7892262},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {233--234},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {As interaction techniques involving scaling of motor space in virtual reality are becoming more prevalent, it is important to understand how individuals adapt to such scalings and how they re-adapt back to non-scaled norms. This preliminary work examines how individuals, performing a targeted ball throwing task, adapted to addition and removal of a translational scaling of the ball’s forward flight. This was examined under various conditions: flight of the ball shown with no delay, hidden flight of the ball with no delay, and hidden flight with a 2 second delay. Hiding the ball’s flight, as well as the delay, created disruptions in the ability of the participants to perform the task and adapt to new scaling conditions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Kang, Sin-Hwa; Nye, Benjamin; Phillips, Artemisa; Campbell, Julia; Goldberg, Stephan L.
Cost-Effective Strategies for Producing Engaging Online Courseware Inproceedings
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{mcalinden_cost-effective_2016,
title = {Cost-Effective Strategies for Producing Engaging Online Courseware},
author = {Ryan McAlinden and Sin-Hwa Kang and Benjamin Nye and Artemisa Phillips and Julia Campbell and Stephan L. Goldberg},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {As distributed learning (dL) and computer-based training (CBT) continue to proliferate, the methods of delivery often remain unengaging and bland for participants. Though many of the leaders in commercial online learning have improved their delivery style and quality in recent years, they continue to fall short in terms of user engagement and satisfaction. PowerPoint regurgitation and video lectures are commonplace and leave end users uninspired and wanting more. This paper discusses results from an ongoing research project, Captivating Virtual Instruction for Training (CVIT), which is aimed at understanding and improving dL through a series of recommendations and best practices for promoting and enhancing student engagement online. Though the central focus is on engagement, and how that translates to learning potential, a third variable (cost) has been examined to understand the financial and resource impacts on making content more interesting (i.e. the return on investment, or ROI). The paper presents findings from a 3-year long experiment comparing existing dL methods and techniques both within and outside of the Army. The project developed two dL versions of an existing Army course (Advanced Situational Awareness-Basic (ASA-B)) – the first was designed around producing material that was as engaging and as immersive as possible within a target budget; the second was a scaled-down version using more traditional, yet contemporary dL techniques (PowerPoint recital, video lectures). The two were then compared along three dimensions– engagement, learning and cost. The findings show that improved engagement in distributed courseware is possible without breaking the bank, though the returns on learning with these progressive approaches remain inconclusive. More importantly, it was determined that the quality and experience of the designers, production staff, writers, animators, programmers, and others cannot be underestimated, and that the familiar phrase – ‘you get what you pay for’ is as true with online learning as it is with other areas of content design and software development.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan; McAlinden, Ryan; Conover, Damon
Producing Usable Simulation Terrain Data from UAS-Collected Imagery Inproceedings
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{spicer_producing_2016,
title = {Producing Usable Simulation Terrain Data from UAS-Collected Imagery},
author = {Ryan Spicer and Ryan McAlinden and Damon Conover},
url = {http://ict.usc.edu/pubs/Producing%20Usable%20Simulation%20Terrain%20Data%20from%20UAS-Collected%20Imagery.pdf},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {At I/ITSEC 2015, we presented an approach to produce geo-referenced, highly-detailed (10cm or better) 3D models for an area of interest using imagery collected from cheap, commercial-off-the-shelf, multirotor Unmanned Aerial Systems (UAS). This paper discusses the next steps in making this data usable for modern-day game and simulation engines, specifically how it may be visually rendered, used and reasoned with by the physics system, the artificial intelligence (AI), the simulation entities, and other components. The pipeline begins by segmenting the georeferenced point cloud created by the UAS imagery into terrain (elevation data) and structures or objects, including vegetation, structures, roads and other surface features. Attributes such as slope and edge detection and color matching are used to perform segmentation and clustering. After the terrain and objects are segmented, they are exported into engine-agnostic formats (georeferenced GeoTIFF digital elevation model (DEM) and ground textures, OBJ/FBX mesh files and JPG textures), which serves as the basis for their representation in-engine. The data is then attributed with metadata used in reasoning – collision surfaces, navigation meshes/networks, apertures, physics attributes (line-of-sight, ray-tracing), material surfaces, and others. Finally, it is loaded into the engine for real-time processing during runtime. The pipeline has been tested with several engines, including Unity, VBS, Unreal and TitanIM. The paper discusses the pipeline from collection to rendering, and as well as how other market/commercially-derived data can serve as the foundation for M&S terrain in the future. Examples of the output of this research are available online (McAlinden, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, J. Adam; Krum, David M.; Bolas, Mark T.
Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments Journal Article
In: ACM Transactions on Applied Perception, vol. 14, no. 2, pp. 1–17, 2016, ISSN: 15443558.
@article{jones_vertical_2016,
title = {Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments},
author = {J. Adam Jones and David M. Krum and Mark T. Bolas},
url = {http://dl.acm.org/citation.cfm?id=2983631},
doi = {10.1145/2983631},
issn = {15443558},
year = {2016},
date = {2016-10-01},
journal = {ACM Transactions on Applied Perception},
volume = {14},
number = {2},
pages = {1--17},
abstract = {In this article, we detail a series of experiments that examines the effect of vertical field-of-view extension and the addition of non-specific peripheral visual stimulation on gait characteristics and distance judgments in a head-worn virtual environment. Specifically, we examined four field-of-view configurations: a common 60° diagonal field of view (48° × 40°), a 60° diagonal field of view with the addition of a luminous white frame in the far periphery, a field of view with an extended upper edge, and a field of view with an extended lower edge. We found that extension of the field of view, either with spatially congruent or spatially non-informative visuals, resulted in improved distance judgments and changes in observed posture. However, these effects were not equal across all field-of-view configurations, suggesting that some configurations may be more appropriate than others when balancing performance, cost, and ergonomics.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Chih-Fan; Bolas, Mark; Suma, Evan
Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping Inproceedings
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, pp. 1–2, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4371-8.
@inproceedings{chen_real-time_2016,
title = {Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping},
author = {Chih-Fan Chen and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2945162},
doi = {10.1145/2945078.2945162},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
pages = {1--2},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {With the recent proliferation of high-fidelity head-mounted displays (HMDs), there is increasing demand for realistic 3D content that can be integrated into virtual reality environments. However, creating photorealistic models is not only difficult but also time consuming. A simpler alternative involves scanning objects in the real world and rendering their digitized counterpart in the virtual world. Capturing objects can be achieved by performing a 3D scan using widely available consumer-grade RGB-D cameras. This process involves reconstructing the geometric model from depth images generated using a structured light or time-of-flight sensor. The colormap is determined by fusing data from multiple color images captured during the scan. Existing methods compute the color of each vertex by averaging the colors from all these images. Blending colors in this manner creates low-fidelity models that appear blurry. (Figure 1 right). Furthermore, this approach also yields textures with fixed lighting that is baked on the model. This limitation becomes more apparent when viewed in head-tracked virtual reality, as the illumination (e.g. specular reflections) does not change appropriately based on the user's viewpoint},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Grechkin, Timofey; Thomas, Jerald; Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Revisiting detection thresholds for redirected walking: combining translation and curvature gains Inproceedings
In: Proceedings of the ACM Symposium on Applied Perception, pp. 113–120, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4383-1.
@inproceedings{grechkin_revisiting_2016,
title = {Revisiting detection thresholds for redirected walking: combining translation and curvature gains},
author = {Timofey Grechkin and Jerald Thomas and Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2931018},
doi = {10.1145/2931002.2931018},
isbn = {978-1-4503-4383-1},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
pages = {113--120},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {Redirected walking enables the exploration of large virtual environments while requiring only a finite amount of physical space. Unfortunately, in living room sized tracked areas the effectiveness of common redirection algorithms such as Steer-to-Center is very limited. A potential solution is to increase redirection effectiveness by applying two types of perceptual manipulations (curvature and translation gains) simultaneously. This paper investigates how such combination may affect detection thresholds for curvature gain. To this end we analyze the estimation methodology and discuss selection process for a suitable estimation method. We then compare curvature detection thresholds obtained under different levels of translation gain using two different estimation methods: method of constant stimuli and Green’s maximum likelihood procedure. The data from both experiments shows no evidence that curvature gain detection thresholds were affected by the presence of translation gain (with test levels spanning previously estimated interval of undetectable translation gain levels). This suggests that in practice currently used levels of translation and curvature gains can be safely applied simultaneously. Furthermore, we present some evidence that curvature detection thresholds may be lower that previously reported. Our estimates indicate that users can be redirected on a circular arc with radius of either 11.6m or 6.4m depending on the estimation method vs. the previously reported value of 22m. These results highlight that the detection threshold estimates vary significantly with the estimation method and suggest the need for further studies to define efficient and reliable estimation methodology},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Barreto, Joseph; Alexander, Oleg; Bolas, Mark; Debevec, Paul; Unger, Jonas
Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array Inproceedings
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18–26, Las Vegas, NV, 2016.
@inproceedings{jones_time-offset_2016,
title = {Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array},
author = {Andrew Jones and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Joseph Barreto and Oleg Alexander and Mark Bolas and Paul Debevec and Jonas Unger},
url = {http://www.cv-foundation.org//openaccess/content_cvpr_2016_workshops/w16/papers/Jones_Time-Offset_Conversations_on_CVPR_2016_paper.pdf},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},
pages = {18--26},
address = {Las Vegas, NV},
abstract = {We present a system for creating and displaying interactive life-sized 3D digital humans based on pre-recorded interviews. We use 30 cameras and an extensive list of questions to record a large set of video responses. Users access videos through a natural conversation interface that mimics face-to-face interaction. Recordings of answers, listening and idle behaviors are linked together to create a persistent visual image of the person throughout the interaction. The interview subjects are rendered using flowed light fields and shown life-size on a special rear-projection screen with an array of 216 video projectors. The display allows multiple users to see different 3D perspectives of the subject in proper relation to their viewpoints, without the need for stereo glasses. The display is effective for interactive conversations since it provides 3D cues such as eye gaze and spatial hand gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Phan, Thai; Bolas, Mark; Krum, David M.
User Perceptions of a Virtual Human Over Mobile Video Chat Interactions Incollection
In: Human-Computer Interaction. Novel User Experiences, vol. 9733, pp. 107–118, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39512-8 978-3-319-39513-5.
@incollection{kang_user_2016,
title = {User Perceptions of a Virtual Human Over Mobile Video Chat Interactions},
author = {Sin-Hwa Kang and Thai Phan and Mark Bolas and David M. Krum},
url = {http://download.springer.com/static/pdf/913/chp%253A10.1007%252F978-3-319-39513-5_10.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-319-39513-5_10&token2=exp=1474906977 acl=%2Fstatic%2Fpdf%2F913%2Fchp%25253A10.1007%25252F978-3-319-39513-5_10.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Fchapter%252F10.1007%252F978-3-319-39513-5_10* hmac=14d38ee320936bf1edfc65a0d3fcc0855c42e0baba46e0f3a9a81293698b8b68},
isbn = {978-3-319-39512-8 978-3-319-39513-5},
year = {2016},
date = {2016-06-01},
booktitle = {Human-Computer Interaction. Novel User Experiences},
volume = {9733},
pages = {107--118},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {We believe that virtual humans, presented over video chat services, such as Skype, and delivered using smartphones, can be an effective way to deliver innovative applications where social interactions are important, such as counseling and coaching. To explore this subject, we have built a hardware and software apparatus that allows virtual humans to initiate, receive, and interact over video calls using Skype or any similar service. With this platform, we conducted two experiments to investigate the applications and characteristics of virtual humans that interact over mobile video. In Experiment 1, we investigated user reactions to the physical realism of the background scene in which a virtual human was displayed. In Experiment 2, we examined how virtual characters can establish and maintain longer term relationships with users, using ideas from Social Exchange Theory to strengthen bonds between interactants. Experiment 2 involved repeated interactions with a virtual human over a period of time. Both studies used counseling-style interactions with users. The results demonstrated that males were more attracted socially to a virtual human that was presented over a realistic background than a featureless background while females were more socially attracted to a virtual human with a less realistic featureless background. The results further revealed that users felt the virtual human was a compassionate partner when they interacted with the virtual human over multiple calls, rather than just a single call.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Inproceedings
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121--129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments Inproceedings
In: 2nd Workshop on Everyday Virtual Reality, IEEE, Greenville, SC, 2016.
@inproceedings{azmandian_redirected_2016,
title = {The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://www.adalsimeone.me/papers/WEVR2016/WEVR2016_Azmandian.pdf},
year = {2016},
date = {2016-03-01},
booktitle = {2nd Workshop on Everyday Virtual Reality},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {With the imminent emergence of low-cost tracking solutions, everyday VR users will soon experience the enhanced immersion of natural walking. Even with consumer-grade room-scale tracking, exploring large virtual environments can be made possible using a software solution known as redirected walking. Wide adoption of this technique has been hindered by the complexity and subtleties involved in successfully deploying redirection. To address this matter, we introduce the Redirected Walking Toolkit, to serve as a unified platform for developing, benchmarking, and deploying redirected walking algorithms. Our design enables seamless integration with standard virtual reality configurations, requiring minimal setup effort for content developers.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
Automated Path Prediction for Redirected Walking Using Navigation Meshes Inproceedings
In: 2016 IEEE Symposium on 3D User Interfaces (3DUI), pp. 63–66, IEEE, Greenville, SC, 2016.
@inproceedings{azmandian_automated_2016,
title = {Automated Path Prediction for Redirected Walking Using Navigation Meshes},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7460032},
doi = {10.1109/3DUI.2016.7460032},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Symposium on 3D User Interfaces (3DUI)},
pages = {63--66},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirected walking techniques have been introduced to overcome physical space limitations for natural locomotion in virtual reality. These techniques decouple real and virtual user trajectories by subtly steering the user away from the boundaries of the physical space while maintaining the illusion that the user follows the intended virtual path. Effectiveness of redirection algorithms can significantly improve when a reliable prediction of the users future virtual path is available. In current solutions, the future user trajectory is predicted based on non-standardized manual annotations of the environment structure, which is both tedious and inflexible. We propose a method for automatically generating environment annotation graphs and predicting the user trajectory using navigation meshes. We discuss the integration of this method with existing redirected walking algorithms such as FORCE and MPCRed. Automated annotation of the virtual environments structure enables simplified deployment of these algorithms in any virtual environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2015
Grechkin, Timofey; Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Towards Context-Sensitive Reorientation for Real Walking in Virtual Reality Inproceedings
In: 2015 IEEE Virtual Reality (VR), pp. 185–186, IEEE, Arles, France, 2015, ISBN: 978-1-4799-1727-3.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{grechkin_towards_2015,
title = {Towards Context-Sensitive Reorientation for Real Walking in Virtual Reality},
author = {Timofey Grechkin and Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7223357},
doi = {10.1109/VR.2015.7223357},
isbn = {978-1-4799-1727-3},
year = {2015},
date = {2015-03-01},
booktitle = {2015 IEEE Virtual Reality (VR)},
pages = {185--186},
publisher = {IEEE},
address = {Arles, France},
abstract = {Redirected walking techniques have been introduced to overcome physical limitations for natural locomotion in virtual reality. Although subtle perceptual manipulations are helpful to keep users within relatively small tracked spaces, it is inevitable that users will approach critical boundary limits. Current solutions to this problem involve breaks in presence by introducing distractors, or freezing the virtual world relative to the user’s perspective. We propose an approach that integrates into the virtual world narrative to draw users’ attention and to cause them to temporarily alter their course to avoid going off bounds. This method ties together unnoticeable translation, rotation, and curvature gains, efficiently reorienting the user while maintaining the user’s sense of immersion. We also discuss how this new method can be effectively used in conjunction with other reorientation techniques.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan A.; Krum, David M.; Richmond, Todd; Bolas, Mark
The MxR Lab at the USC Institute for Creative Technologies Book
Arles, France, 2015.
Abstract | Links | BibTeX | Tags: MxR
@book{suma_mxr_2015,
title = {The MxR Lab at the USC Institute for Creative Technologies},
author = {Evan A. Suma and David M. Krum and Todd Richmond and Mark Bolas},
url = {http://ict.usc.edu/pubs/The%20MxR%20Lab%20at%20the%20USC%20Institute%20for%20Creative%20Technologies.pdf},
year = {2015},
date = {2015-03-01},
address = {Arles, France},
abstract = {The MxR Lab at the University of Southern California explores techniques and technologies to improve the fluency of humancomputer interactions and create engaging and effective synthetic experiences. With a research facility at the Institute for Creative Technologies as well as the satellite MxR Studio at the School of Cinematic Arts, this unique environment facilitates crossdisciplinary teams from computer science, engineering, communications, and cinema. The MxR Lab philosophy begins with rapid prototyping and playful exploration that progressively evolves to more refined development pipelines, formal research studies, and eventual dissemination through academic papers and open-source initiatives. We also sometimes engage in large-scale Nerf battles.},
keywords = {MxR},
pubstate = {published},
tppubtype = {book}
}
Casas, Dan; Alexander, Oleg; Feng, Andrew W.; Fyffe, Graham; Ichikari, Ryosuke; Debevec, Paul; Wang, Rhuizhe; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors Inproceedings
In: Proceedings of the 19th Symposium on Interactive 3D Graphics and Games, pp. 134–134, ACM Press, San Francisco, CA, 2015, ISBN: 978-1-4503-3392-4.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC, Virtual Humans
@inproceedings{casas_rapid_2015,
title = {Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors},
author = {Dan Casas and Oleg Alexander and Andrew W. Feng and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2699276.2721398},
doi = {10.1145/2699276.2721398},
isbn = {978-1-4503-3392-4},
year = {2015},
date = {2015-02-01},
booktitle = {Proceedings of the 19th Symposium on Interactive 3D Graphics and Games},
pages = {134--134},
publisher = {ACM Press},
address = {San Francisco, CA},
abstract = {Creating and animating a realistic 3D human face has been an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. In this paper, we propose a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single Microsoft Kinect and requires no artistic expertise on the part of the capture subject. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires 4 minutes of processing time to transform it into a blendshape model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmark detection and optical flow. We demonstrate the effectiveness of the proposed method by capturing 3D facial models of different human subjects and puppeteering their models in an animation system with real-time facial performance retargeting.},
keywords = {Graphics, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2014
Feng, Andrew; Lucas, Gale; Marsella, Stacy; Suma, Evan; Chiu, Chung-Cheng; Casas, Dan; Shapiro, Ari
Acting the Part: The Role of Gesture on Avatar Identity Inproceedings
In: Proceedings of the Seventh International Conference on Motion in Games (MIG 2014), pp. 49–54, ACM Press, Playa Vista, CA, 2014, ISBN: 978-1-4503-2623-0.
Abstract | Links | BibTeX | Tags: MxR, Social Simulation, UARC, Virtual Humans
@inproceedings{feng_acting_2014,
title = {Acting the Part: The Role of Gesture on Avatar Identity},
author = {Andrew Feng and Gale Lucas and Stacy Marsella and Evan Suma and Chung-Cheng Chiu and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2668064.2668102},
doi = {10.1145/2668064.2668102},
isbn = {978-1-4503-2623-0},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the Seventh International Conference on Motion in Games (MIG 2014)},
pages = {49--54},
publisher = {ACM Press},
address = {Playa Vista, CA},
abstract = {Recent advances in scanning technology have enabled the widespread capture of 3D character models based on human subjects. However, in order to generate a recognizable 3D avatar, the movement and behavior of the human subject should be captured and replicated as well. We present a method of generating a 3D model from a scan, as well as a method to incorporate a subjects style of gesturing into a 3D character. We present a study which shows that 3D characters that used the gestural style as their original human subjects were more recognizable as the original subject than those that don’t.},
keywords = {MxR, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Chih-Fan; Spicer, Ryan; Yahata, Rhys; Bolas, Mark; Suma, Evan
Real-time and Robust Grasping Detection Inproceedings
In: Proceedings of the 2nd ACM symposium on Spatial user interaction, pp. 159–159, ACM, Honolulu, HI, 2014.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{chen_real-time_2014,
title = {Real-time and Robust Grasping Detection},
author = {Chih-Fan Chen and Ryan Spicer and Rhys Yahata and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Real-Time%20and%20Robust%20Grasping%20Detection.pdf},
year = {2014},
date = {2014-10-01},
booktitle = {Proceedings of the 2nd ACM symposium on Spatial user interaction},
pages = {159--159},
publisher = {ACM},
address = {Honolulu, HI},
abstract = {Depth-based gesture cameras provide a promising and novel way to interface with computers. Nevertheless, this type of interaction remains challenging due to the complexity of finger interactions and the under large viewpoint variations. Existing middleware such as Intel Perceptual Computing SDK (PCSDK) or SoftKinetic IISU can provide abundant hand tracking and gesture information. However, the data is too noisy (Fig. 1, left) for consistent and reliable use in our application. In this work, we present a filtering approach that combines several features from PCSDK to achieve more stable hand openness and supports grasping interactions in virtual environments. Support vector machine (SVM), a machine learning method, is used to achieve better accuracy in a single frame, and Markov Random Field (MRF), a probability theory, is used to stabilize and smooth the sequential output. Our experimental results verify the effectiveness and the robustness of our method.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Countering User Deviation During Redirected Walking Inproceedings
In: Proceedings of the ACM Symposium on Applied Perception, Vancouver, British Columbia, Canada, 2014.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{azmandian_countering_2014,
title = {Countering User Deviation During Redirected Walking},
author = {Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Countering%20User%20Deviation%20During%20Redirected%20Walking.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
address = {Vancouver, British Columbia, Canada},
abstract = {Redirected Walking is technique that leverages human perception characteristics to allow locomotion in virtual environments larger than the tracking area. Among the many redirection techniques, some strictly depend on the user’s current position and orientation, while more recent algorithms also depend on the user’s predicted behavior. This prediction serves as an input to a computationally expensive search to determine an optimal path. The search output is formulated as a series of gains to be applied at different stages along the path. An example prediction could be if a user is walking down a corridor, a natural prediction would be that the user will walk along a straight line down the corridor, and she will choose one of the possible directions with equal probability. In practice, deviations from the expected virtual path are inevitable, and as a result, the real world path traversed will differ from the original prediction. These deviations can not only force the search to select a less optimal path in the next iteration, but also in cases cause the users to go off bounds, requiring resets, causing a jarring experience for the user. We propose a method to account for these deviations by modifying the redirection gains per update frame, aiming to keep the user on the intended predicted physical path.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Feng, Andrew; Wang, Ruizhe; Li, Hao; Bolas, Mark; Medioni, Gerard; Suma, Evan
Rapid avatar capture and simulation using commodity depth sensors Journal Article
In: Computer Animation and Virtual Worlds, vol. 25, no. 3-4, pp. 201–211, 2014, ISSN: 15464261.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@article{shapiro_rapid_2014,
title = {Rapid avatar capture and simulation using commodity depth sensors},
author = {Ari Shapiro and Andrew Feng and Ruizhe Wang and Hao Li and Mark Bolas and Gerard Medioni and Evan Suma},
url = {http://ict.usc.edu/pubs/Rapid%20Avatar%20Capture%20and%20Simulation%20Using%20Commodity%20Depth%20Sensors.pdf},
doi = {10.1002/cav.1579},
issn = {15464261},
year = {2014},
date = {2014-05-01},
journal = {Computer Animation and Virtual Worlds},
volume = {25},
number = {3-4},
pages = {201--211},
abstract = {We demonstrate a method of acquiring a 3D model of a human using commodity scanning hardware and then controlling that 3D figure in a simulated environment in only a few minutes. The model acquisition requires four static poses taken at 90 degree angles relative to each other. The 3D model is then given a skeleton and smooth binding information necessary for control and simulation. The 3D models that are captured are suitable for use in applications where recognition and distinction among characters by shape, form, or clothing is important, such as small group or crowd simulations or other socially oriented applications. Because of the speed at which a human figure can be captured and the low hardware requirements, this method can be used to capture, track, and model human figures as their appearances change over time.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Krum, David M.; Kang, Sin-Hwa; Bolas, Mark
Virtual Coaches over Mobile Video Inproceedings
In: Proceedingsof International Conference on Computer Animation and Social Agents (CASA), 2014.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@inproceedings{krum_virtual_2014,
title = {Virtual Coaches over Mobile Video},
author = {David M. Krum and Sin-Hwa Kang and Mark Bolas},
url = {http://ict.usc.edu/pubs/Virtual%20Coaches%20over%20Mobile%20Video.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedingsof International Conference on Computer Animation and Social Agents (CASA)},
abstract = {We hypothesize that the context of a smartphone, how a virtual human is presented within a smartphone app, and indeed, the nature of that app, can profoundly affect how the virtual human is perceived by a real human. We believe that virtual humans, presented over video chat services (such as Skype) and delivered using mobile phones, can be an effective way to deliver coaching applications. We propose to build a prototype system that allows virtual humans to initiate and receive Skype calls. This hardware will enable broadcast of the audio and video imagery of a character. Using this platform and a virtual human, we will conduct two user studies. The first study will examine factors involved in making a mobile video based character seem engaging and “real”. This study will examine how character appearance and the artifacts of the communication channel, such as video and audio quality, can affect rapport with a virtual human. The second study will examine ways to maintain a long-term relationship with a character, leveraging the character’s ability to call and interact with a real human over a longer period of time. These studies will help develop design guidelines for presenting virtual humans over mobile video.},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Yahata, Rhys; Bolas, Mark; Suma, Evan
An Enhanced Steering Algorithm for Redirected Walking in Virtual Environments Inproceedings
In: IEEE Virtual Reality 2014, pp. 65–66, 2014.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{azmandian_enhanced_2014,
title = {An Enhanced Steering Algorithm for Redirected Walking in Virtual Environments},
author = {Mahdi Azmandian and Rhys Yahata and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/An%20Enhanced%20Steering%20Algorithm%20for%20Redirected%20Walking%20in%20Virtual%20Environments.pdf},
year = {2014},
date = {2014-03-01},
booktitle = {IEEE Virtual Reality 2014},
pages = {65--66},
abstract = {Redirected walking techniques enable natural locomotion through immersive virtual environments that are considerably larger than the available real world walking space. However, the most effective strategy for steering the user remains an open question, as most previously presented algorithms simply redirect toward the center of the physical space. In this work, we present a theoretical framework that plans a walking path through a virtual environment and calculates the parameters for combining translation, rotation, and curvature gains such that the user can traverse a series of defined waypoints efficiently based on a utility function. This function minimizes the number of overt reorientations to avoid introducing potential breaks in presence. A notable advantage of this approach is that it leverages knowledge of the layout of both the physical and virtual environments to enhance the steering strategy.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
Interpolating vertical parallax for an autostereoscopic three-dimensional projector array Journal Article
In: Journal of Electronic Imaging, vol. 23, no. 1, 2014, ISSN: 1017-9909.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC
@article{jones_interpolating_2014,
title = {Interpolating vertical parallax for an autostereoscopic three-dimensional projector array},
author = {Andrew Jones and Koki Nagano and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://electronicimaging.spiedigitallibrary.org/article.aspx?doi=10.1117/1.JEI.23.1.011005},
doi = {10.1117/1.JEI.23.1.011005},
issn = {1017-9909},
year = {2014},
date = {2014-03-01},
journal = {Journal of Electronic Imaging},
volume = {23},
number = {1},
abstract = {We present a technique for achieving tracked vertical parallax for multiple users using a variety of autostereoscopic projector array setups, including front- and rear-projection and curved display surfaces. This hybrid parallax approach allows for immediate horizontal parallax as viewers move left and right and tracked parallax as they move up and down, allowing cues such as three-dimensional (3-D) perspective and eye contact to be conveyed faithfully. We use a low-cost RGB-depth sensor to simultaneously track multiple viewer head positions in 3-D space, and we interactively update the imagery sent to the array so that imagery directed to each viewer appears from a consistent and correct vertical perspective. Unlike previous work, we do not assume that the imagery sent to each projector in the array is rendered from a single vertical perspective. This lets us apply hybrid parallax to displays where a single projector forms parts of multiple viewers’ imagery. Thus, each individual projected image is rendered with multiple centers of projection, and might show an object from above on the left and from below on the right. We demonstrate this technique using a dense horizontal array of pico-projectors aimed into an anisotropic vertical diffusion screen, yielding 1.5 deg angular resolution over 110 deg field of view. To create a seamless viewing experience for multiple viewers, we smoothly interpolate the set of viewer heights and distances on a per-vertex basis across the array’s field of view, reducing image distortion, cross talk, and artifacts from tracking errors.},
keywords = {Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {article}
}
Thomas, Jerald; Bashyal, Raghav; Goldstein, Samantha; Suma, Evan
MuVR: A Multi-user Virtual Reality Platform Inproceedings
In: IEEE Virtual Reality 2014, pp. 115–116, IEEE, Minneapolis, Minnesota, 2014.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{thomas_muvr_2014,
title = {MuVR: A Multi-user Virtual Reality Platform},
author = {Jerald Thomas and Raghav Bashyal and Samantha Goldstein and Evan Suma},
url = {http://ict.usc.edu/pubs/MuVR%20-%20A%20Multi-user%20Virtual%20Reality%20Platform.pdf},
year = {2014},
date = {2014-03-01},
booktitle = {IEEE Virtual Reality 2014},
pages = {115--116},
publisher = {IEEE},
address = {Minneapolis, Minnesota},
abstract = {Consumer adoption of virtual reality technology has historically been held back by poor accessibility, the lack of intuitive multi-user capabilities, dependence on external infrastructure for rendering and tracking, and the amount of time and effort required to enter virtual reality systems. This poster presents the current status of our work creating MuVR, a Multi-User Virtual Reality platform that seeks to overcome these hindrances. The MuVR project comprises four main goals: scalable and easy to use multi-user capabilities, portable and self-contained hardware, a rapidly deployable system, and ready accessibility to others. We provide a description of the platform we developed to address these goals and discuss potential directions for future work.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Feng, Andrew; Wang, Ruizhe; Medioni, Gerard; Bolas, Mark; Suma, Evan A.
Automatic Acquisition and Animation of Virtual Avatars Inproceedings
In: Virtual Reality (VR), 2014 iEEE, pp. 185–186, IEEE, Minneapolis, Minnesota, 2014, ISBN: 978-1-4799-2871-2.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{shapiro_automatic_2014,
title = {Automatic Acquisition and Animation of Virtual Avatars},
author = {Ari Shapiro and Andrew Feng and Ruizhe Wang and Gerard Medioni and Mark Bolas and Evan A. Suma},
url = {http://ict.usc.edu/pubs/Automatic%20acquisition%20and%20animation%20of%20virtual%20avatars.pdf},
doi = {10.1109/VR.2014.6802113},
isbn = {978-1-4799-2871-2},
year = {2014},
date = {2014-03-01},
booktitle = {Virtual Reality (VR), 2014 iEEE},
pages = {185--186},
publisher = {IEEE},
address = {Minneapolis, Minnesota},
abstract = {The USC Institute for Creative Technologies will demonstrate a pipline for automatic reconstruction and animation of lifelike 3D avatars acquired by rotating the user's body in front of a single Microsoft Kinect sensor. Based on a fusion of state-of-the-art techniques in computer vision, graphics, and animation, this approach can produce a fully rigged character model suitable for real-time virtual environments in less than four minutes.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Kang, Sin-Hwa; Wang, Ning
Using Social Agents to Explore Theories of Rapport and Emotional Resonance Incollection
In: Social Emotions in Nature and Artifact, pp. 181 –195, 2014.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@incollection{gratch_using_2014,
title = {Using Social Agents to Explore Theories of Rapport and Emotional Resonance},
author = {Jonathan Gratch and Sin-Hwa Kang and Ning Wang},
url = {http://www.oxfordscholarship.com/view/10.1093/acprof:oso/9780195387643.001.0001/acprof-9780195387643-chapter-12},
year = {2014},
date = {2014-01-01},
booktitle = {Social Emotions in Nature and Artifact},
pages = {181 --195},
abstract = {We discuss several technical challenges must be overcome before realizing this vision. More importantly, success depends not on simply overcoming these challenges, but demonstrating that such interactivity has measurable and desirable consequences for human-computer interaction. In this chapter, we describe the Rapport Agent, an interactive agent and methodological tool designed to Emotions unfold in with bewildering complexity in face-to-face social interactions. Building computer programs that can engage people in this unfolding emotional dance is a fascinating prospect with potentially profound practical and scientific consequences. Computer agents that engage people in this manner could enhance our understanding of this fundamental social process and, more practically, have dramatic implications investigate the role of nonverbal patterning in human-computer and computer-mediated interaction. We outline a series of laboratory studies and resulting findings that give insight into how nonverbal patterns of behavior can influence both subjective perceptions (such as feelings of rapport or embarrassment) and ehavioural outcomes (such as speech fluency or intimate self-disclosure).},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
2013
Huang, Yu-Jen; Bolas, Mark; Suma, Evan
Fusing Depth, Color, and Skeleton Data for Enhanced Real-Time Hand Segmentation Inproceedings
In: ACM Symposium on Spatial User Interaction, 2013.
Links | BibTeX | Tags: MxR, UARC
@inproceedings{huang_fusing_2013,
title = {Fusing Depth, Color, and Skeleton Data for Enhanced Real-Time Hand Segmentation},
author = {Yu-Jen Huang and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Fusing%20Depth,%20Color,%20and%20Skeleton%20Data%20for%20Enhanced%20Real-Time%20Hand%20Segmentation.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {ACM Symposium on Spatial User Interaction},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Krum, David M.; Lange, Belinda; Koenig, Sebastian; Rizzo, Albert; Bolas, Mark
Adapting user interfaces for gestural interaction with the flexible action and articulated skeleton toolkit Journal Article
In: Computers & Graphics, vol. 37, no. 3, pp. 193–201, 2013.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@article{suma_adapting_2013,
title = {Adapting user interfaces for gestural interaction with the flexible action and articulated skeleton toolkit},
author = {Evan Suma and David M. Krum and Belinda Lange and Sebastian Koenig and Albert Rizzo and Mark Bolas},
url = {http://ict.usc.edu/pubs/Adapting%20user%20interfaces%20for%20gestural%20interaction%20with%20the%20%EF%AC%82exible%20action%20and%20articulated%20skeleton%20toolkit.pdf},
year = {2013},
date = {2013-05-01},
journal = {Computers & Graphics},
volume = {37},
number = {3},
pages = {193--201},
abstract = {We present the Flexible Action and Articulated Skeleton Toolkit (FAAST), a middleware software framework for integrating full-body interaction with virtual environments, video games, and other user interfaces. This toolkit provides a complete end-to-end solution that includes a graphical user interface for custom gesture creation, sensor configuration, skeletal tracking, action recognition, and a variety of output mechanisms to control third party applications, allowing virtually any PC application to be repurposed for gestural control even if it does not explicit support input from motion sensors. To facilitate intuitive and transparent gesture design, we define a syntax for representing human gestures using rule sets that correspond to the basic spatial and temporal components of an action. These individual rules form primitives that, although conceptually simple on their own, can be combined both simultaneously and in sequence to form sophisticated gestural interactions. In addition to presenting the system architecture and our approach for representing and designing gestural interactions, we also describe two case studies that evaluated the use of FAAST for controlling first-person video games and improving the accessibility of computing interfaces for individuals with motor impairments. Thus, this work represents an important step toward making gestural interaction more accessible for practitioners, researchers, and hobbyists alike.⬚},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {article}
}
Suma, Evan; Krum, David M.; Bolas, Mark
Redirected Walking in Mixed Reality Training Applications Incollection
In: Human Walking in Virtual Environments: Perception, Technology, and Applications, Springer, 2013, ISBN: 1-4419-8431-3.
Abstract | Links | BibTeX | Tags: MxR, UARC
@incollection{suma_redirected_2013,
title = {Redirected Walking in Mixed Reality Training Applications},
author = {Evan Suma and David M. Krum and Mark Bolas},
url = {http://www.amazon.com/Human-Walking-Virtual-Environments-Applications/dp/1441984313/ref=sr_1_1},
isbn = {1-4419-8431-3},
year = {2013},
date = {2013-05-01},
booktitle = {Human Walking in Virtual Environments: Perception, Technology, and Applications},
publisher = {Springer},
edition = {2013},
abstract = {To create effective immersive training experiences, it is important to provide intuitive interfaces that allow users to move around and interact with virtual content in a manner that replicates real world experiences. However, natural loco- motion remains an implementation challenge because the dimensions of the phys- ical tracking space restrict the size of the virtual environment that users can walk through. To relax these limitations, redirected walking techniques may be employed to enable walking through immersive virtual environments that are substantially larger than the physical tracking area. In this chapter, we present practical design considerations for employing redirected walking in immersive training applications and recent research evaluating the impact on spatial orientation. Additionally, we also describe an alternative implementation of redirection that is more appropriate for mixed reality environments. Finally, we discuss challenges and future directions for research in redirected walking with the goal of transitioning these techniques into practical training simulators.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Jones, J. Adam; II, J. Edward Swan; Bolas, Mark
Peripheral Stimulation and its Effect on Perceived Spatial Scale in Virtual Environments Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 19, no. 4, pp. 701–710, 2013.
Abstract | Links | BibTeX | Tags: MxR, UARC
@article{jones_peripheral_2013,
title = {Peripheral Stimulation and its Effect on Perceived Spatial Scale in Virtual Environments},
author = {J. Adam Jones and J. Edward Swan II and Mark Bolas},
url = {http://ict.usc.edu/pubs/Peripheral%20Stimulation%20and%20its%20Effect%20on%20Perceived%20Spatial%20Scale%20in%20Virtual%20Environments.pdf},
doi = {10.1109/TVCG.2013.37},
year = {2013},
date = {2013-04-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {19},
number = {4},
pages = {701--710},
abstract = {The following series of experiments explore the effect of static peripheral stimulation on the perception of distance and spatial scale in a typical head-mounted virtual environment. It was found that applying constant white light in an observer’s far periphery enabled the observer to more accurately judge distances using blind walking. An effect of similar magnitude was also found when observers estimated the size of a virtual space using a visual scale task. The presence of the effect across multiple psychophysical tasks provided confidence that a perceptual change was, in fact, being invoked by the addition of the peripheral stimulation. These results were also compared to observer performance in a very large field of view virtual environment and in the real world. The subsequent findings raise the possibility that distance judgments in virtual environments might be considerably more similar to those in the real world than previous work has suggested.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {article}
}
Finklestein, Samantha; Barnes, Tiffany; Wartell, Zachary; Suma, Evan
Evaluation of the Exertion and Motivation Factors of a Virtual Reality Exercise Game for Children with Autism Inproceedings
In: Workshop on Virtual and Augmented Assistive Technology, Orlando, FL, 2013.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{finklestein_evaluation_2013,
title = {Evaluation of the Exertion and Motivation Factors of a Virtual Reality Exercise Game for Children with Autism},
author = {Samantha Finklestein and Tiffany Barnes and Zachary Wartell and Evan Suma},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20the%20Exertion%20and%20Motivation%20Factors%20of%20a%20Virtual%20Reality%20Exercise%20Game%20for%20Children%20with%20Autism.pdf},
year = {2013},
date = {2013-03-01},
booktitle = {Workshop on Virtual and Augmented Assistive Technology},
address = {Orlando, FL},
abstract = {Children with autism experience significant positive behavioral and health benefits from exercise, though many of these children tend to lead sedentary lifestyles. Video games that incorporate physical activity, known as exergames, may help to motivate such children to engage in vigorous exercise, thus leading to more healthy lifestyles and reducing the likelihood of obesity. In this paper, we present a study of physical activity and motivation level for ten children with autism as they played an immersive virtual reality exergame that involved fast-paced full-body movement. Our results showed that most children, including non-verbal participants, were able to achieve vigorous activity levels, with several of them maintaining very high levels of exertion. Furthermore, the children reported high levels of enjoyment and indicated they would exercise more often if such games were routinely available. These encouraging findings suggest that exergames are a promising way to empower the families of children with autism with tools to help improve their child’s health and quality of life.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Vasylevska, Khrystyna; Kaufmann, Hannes; Bolas, Mark; Suma, Evan
Flexible Spaces: Dynamic Layout Generation for Infinite Walking in Virtual Environments Inproceedings
In: IEEE Symposium on 3D User Interfaces, Orlando, FL, 2013.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{vasylevska_flexible_2013,
title = {Flexible Spaces: Dynamic Layout Generation for Infinite Walking in Virtual Environments},
author = {Khrystyna Vasylevska and Hannes Kaufmann and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Flexible%20Spaces-%20Dynamic%20Layout%20Generation%20for%20Infinite%20Walking%20in%20Virtual%20Environments.pdf},
year = {2013},
date = {2013-03-01},
booktitle = {IEEE Symposium on 3D User Interfaces},
address = {Orlando, FL},
abstract = {Redirected walking techniques enable natural locomotion through immersive virtual environments (VEs) that are larger than the real world workspace. Most existing techniques rely upon manipulating the mapping between physical and virtual motions while the layout of the environment remains constant. However, if the primary focus of the experience is on the virtual world’s content, rather than on its spatial layout, then the goal of redirected walking can be achieved through an entirely different strategy. In this paper, we introduce flexible spaces – a novel redirection technique that enables infinite real walking in virtual environments that do not require replication of real world layouts. Flexible spaces overcome the limitations and generalize the use of overlapping (impossible) spaces and change blindness by employing procedural layout generation. Our approach allows VE designers to focus on the content of the virtual world independent of the implementation details imposed by real walking, thereby making spatial manipulation techniques more practical for use in a variety of application domains.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Vasylevska, Khrystyna; Kaufmann, Hannes; Bolas, Mark; Suma, Evan
Flexible Spaces: A Virtual Step Outside of Reality Inproceedings
In: IEEE Virtual Reality, Orlando, FL, 2013.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{vasylevska_flexible_2013-1,
title = {Flexible Spaces: A Virtual Step Outside of Reality},
author = {Khrystyna Vasylevska and Hannes Kaufmann and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Flexible%20Spaces-%20A%20Virtual%20Step%20Outside%20of%20Reality.pdf},
year = {2013},
date = {2013-03-01},
booktitle = {IEEE Virtual Reality},
address = {Orlando, FL},
abstract = {In this paper we introduce the concept of flexible spaces – a novel redirection technique that generalizes the use of overlapping (impossible) spaces and change blindness in an algorithm for dynamic layout generation. Flexible spaces is an impossible environment that violates the real world constancy in favor of providing the experience of seamless, unrestricted natural walking over a large-scale virtual environment (VE).},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2012
Yeh, Shih-Ching; Tsai, Chia-Fen; Yao-Chung, Fan; Pin-Chun, Liu; Rizzo, Albert
An Innovative ADHD Assessment System Using Virtual Reality Conference
IEEE, Langkawi, Malaysia, 2012.
Abstract | Links | BibTeX | Tags: MedVR, MxR, The Narrative Group, Virtual Humans
@conference{nokey,
title = {An Innovative ADHD Assessment System Using Virtual Reality},
author = {Shih-Ching Yeh and Chia-Fen Tsai and Fan Yao-Chung and Liu Pin-Chun and Albert Rizzo},
url = {http://ict.usc.edu/pubs/An%20innovative%20ADHD%20assessment%20system%20using%20virtual%20reality.pdf},
year = {2012},
date = {2012-12-04},
publisher = {IEEE},
address = {Langkawi, Malaysia},
abstract = {Attention Deficit Hyperactivity Disorder (ADHD) has a prevalence of about 5% and may cause inferiority complex, personality disorders, interpersonal impediment, and even anti-social behaviors in affected children if not treated early. In the past, the diagnosis of ADHD patients mainly depended on paper tests or behavior scales. However, such tests are usually time-consuming and their application suffers from constraints of external conditions in terms of test content and test type. Through the application of VR technology including head mounted display(HMD), game technology and sensors, this study develops and constructs an interactive panoramic virtual classroom scenario in which a blackboard embedded with listening test, CPT test, executive test, and visual memory test specially designed for attention and executive functions is incorporated; moreover, this study also develops a new assessment & diagnosis system based on children's performance, behavior & reaction in the above-mentioned four tests through an enormous and systematic design of a battery of visual & auditory distractions of different intensity levels, durations, and sequence. The system developed in this study is used to carry out a pilot trial on healthy volunteers and its functionalities are confirmed by the test results.},
keywords = {MedVR, MxR, The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {conference}
}
Krum, David M.; Suma, Evan; Bolas, Mark
Spatial Misregistration of Virtual Human Audio: Implications of the Precedence Effect Inproceedings
In: The 12th International Conference on Intelligent Virtual Agents (IVA), Santa Cruz, CA, 2012.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{krum_spatial_2012-1,
title = {Spatial Misregistration of Virtual Human Audio: Implications of the Precedence Effect},
author = {David M. Krum and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Spatial%20Misregistration%20of%20Virtual%20Human%20Audio-%20Implications%20of%20the%20Precedence%20Effect.pdf},
year = {2012},
date = {2012-09-01},
booktitle = {The 12th International Conference on Intelligent Virtual Agents (IVA)},
address = {Santa Cruz, CA},
abstract = {Virtual humans are often presented as mixed reality characters projected onto screens that are blended into a physical setting. Stereo loudspeakers to the left and right of the screen are typically used for virtual human audio. Unfortunately, stereo pairs can produce an effect known as precedence, which causes users standing close to a particular loudspeaker to perceive a collapse of the stereo sound to that singular loudspeaker. We studied if this effect might degrade the presentation of a virtual character, or if this would be prevented by the ventriloquism effect. Our results demonstrate that from viewing distances common to virtual human scenarios, a movement equivalent to a single stride can induce a stereo collapse, creating conflicting perceived locations of the virtual human’s voice. Users also expressed a preference for a sound source collocated with the virtual human’s mouth rather than a stereo pair. These results provide several design implications for virtual human display systems.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Suma, Evan; Bolas, Mark
Spatial Misregistration of Virtual Human Audio: Implications of the Precedence Effect Inproceedings
In: The 12th International Conference on Intelligent Virtual Agents (IVA), Santa Cruz, CA, 2012.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{krum_spatial_2012,
title = {Spatial Misregistration of Virtual Human Audio: Implications of the Precedence Effect},
author = {David M. Krum and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Spatial%20Misregistration%20of%20Virtual%20Human%20Audio-%20Implications%20of%20the%20Precedence%20Effect.pdf},
year = {2012},
date = {2012-09-01},
booktitle = {The 12th International Conference on Intelligent Virtual Agents (IVA)},
address = {Santa Cruz, CA},
abstract = {Virtual humans are often presented as mixed reality characters projected onto screens that are blended into a physical setting. Stereo loudspeakers to the left and right of the screen are typically used for virtual human audio. Unfortunately, stereo pairs can produce an effect known as precedence, which causes users standing close to a particular loudspeaker to perceive a collapse of the stereo sound to that singular loudspeaker. We studied if this effect might degrade the presentation of a virtual character, or if this would be prevented by the ventriloquism effect. Our results demonstrate that from viewing distances common to virtual human scenarios, a movement equivalent to a single stride can induce a stereo collapse, creating conflicting perceived locations of the virtual human’s voice. Users also expressed a preference for a sound source collocated with the virtual human’s mouth rather than a stereo pair. These results provide several design implications for virtual human display systems.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Graham, Paul; Busch, Jay; Bolas, Mark
A Cell Phone Based Platform for Facial Performance Capture Inproceedings
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{debevec_cell_2012,
title = {A Cell Phone Based Platform for Facial Performance Capture},
author = {Paul Debevec and Paul Graham and Jay Busch and Mark Bolas},
url = {http://ict.usc.edu/pubs/A%20Cell%20Phone%20Based%20Platform%20for%20Facial%20Performance%20Capture.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
address = {Los Angeles, CA},
abstract = {We demonstrate a novel light probe which can estimate the full dynamic range of a scene with multiple bright light sources. It places diffuse strips between mirrored spherical quadrants, effectively co-locating diffuse and mirrored probes to record the full dynamic range of illumination in a single exposure. From this image, we estimate the intensity of multiple saturated light sources by solving a linear system.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, J. Adam; II, J. Edward Swan; Singh, Gurjot; Reddy, Sujan; Moser, Kenneth; Hua, Chunya; Ellis, Stephen R.
Improvements in Visually Directed Walking in Virtual Environments Cannot be Explained by Changes in Gait Alone Inproceedings
In: ACM Symposium in Applied Perception (SAP), pp. 11–16, Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{jones_improvements_2012,
title = {Improvements in Visually Directed Walking in Virtual Environments Cannot be Explained by Changes in Gait Alone},
author = {J. Adam Jones and J. Edward Swan II and Gurjot Singh and Sujan Reddy and Kenneth Moser and Chunya Hua and Stephen R. Ellis},
url = {http://ict.usc.edu/pubs/Improvements%20in%20Visually%20Directed%20Walking%20in%20Virtual%20Environments%20Cannot%20be%20Explained%20by%20Changes%20in%20Gait%20Alone.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM Symposium in Applied Perception (SAP)},
pages = {11--16},
address = {Los Angeles, CA},
abstract = {A previous study indicated that peripheral visual information strongly affects the judgment of egocentric distances for users of immersive virtual environments. The experiment described in this document aimed to investigate if these effects could be explained in terms of changes in gait caused by visual information in the extreme periphery. Three conditions with varying degrees of peripheral occlusion were tested and participants’ walking characteristics measured. The results indicate that the improvements in distance judgments, as peripheral information increases, can only partially be explained in terms of gait modification, but likely involve both changes in the characteristics of gait and other spatial or movement parameters.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Busch, Jay; Bolas, Mark; Debevec, Paul
A Single-Shot Light Probe Inproceedings
In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Los Angeles, CA, 2012.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{graham_single-shot_2012,
title = {A Single-Shot Light Probe},
author = {Paul Graham and Jay Busch and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Single-Shot%20Light%20Probe.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH)},
address = {Los Angeles, CA},
abstract = {We demonstrate a novel light probe which can estimate the full dynamic range of a scene with multiple bright light sources. It places diffuse strips between mirrored spherical quadrants, effectively co-locating diffuse and mirrored probes to record the full dynamic range of illumination in a single exposure. From this image, we estimate the intensity of multiple saturated light sources by solving a linear system.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, J. Adam; Suma, Evan; Krum, David M.; Bolas, Mark
Comparability of Narrow and Wide Field-Of-View Head-Mounted Displays for Medium-Field Distance Judgments Inproceedings
In: ACM Symposium in Applied Perception (SAP), 2012.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{jones_comparability_2012,
title = {Comparability of Narrow and Wide Field-Of-View Head-Mounted Displays for Medium-Field Distance Judgments},
author = {J. Adam Jones and Evan Suma and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/Comparability%20of%20Narrow%20and%20Wide%20Field-Of-View%20Head-Mounted%20Displays%20for%20Medium-Field%20Distance%20Judgments.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM Symposium in Applied Perception (SAP)},
abstract = {As wider field-of-view displays become more common, the question arises as to whether or not data collected on these displays are comparable to those collected with smaller field-of-view displays. This document describes a pilot study that aimed to address these concerns by comparing medium-field distance judgments in a 60° FOV display, a 150° FOV display, and a simulated 60° FOV within the 150° FOV display. The results indicate that participants performed similarly in both the actual and simulated 60° FOV displays. On average, participants in the 150° FOV display improved distance judgments by 13% over the 60° FOV displays},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Lange, Belinda; Rizzo, Albert; Krum, David M.; Bolas, Mark
FAAST-R: Defining a Core Mechanic for Designing Gestural Interfaces Inproceedings
In: The Dimensions of CHI: Touching and Designing 3D User Interfaces (3DCHI), Austin, TX, 2012.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{suma_faast-r_2012,
title = {FAAST-R: Defining a Core Mechanic for Designing Gestural Interfaces},
author = {Evan Suma and Belinda Lange and Albert Rizzo and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/FAAST-R-%20Defining%20a%20Core%20Mechanic%20for%20Designing%20Gestural%20Interfaces.pdf},
year = {2012},
date = {2012-05-01},
booktitle = {The Dimensions of CHI: Touching and Designing 3D User Interfaces (3DCHI)},
address = {Austin, TX},
abstract = {We present a syntax for representing human gestures using rule sets that correspond to the basic spatial and temporal components of an action. These individual rules form primitives that, although conceptually simple on their own, can be combined both simultaneously and in sequence to form sophisticated gestural interactions. Along with a graphical user interface for custom gesture creation, this approach was incorporated into the Flexible Action and Articulated Skeleton Toolkit as a recognition module (FAAST-R). This toolkit can either be used to facilitate the development of motion-based user interfaces or to repurpose existing closed-source applications and games by mapping body motions to keyboard and mouse events. Thus, this work represents an important step towards making gestural interaction more accessible for practitioners, researchers, and hobbyists alike.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Lange, Belinda; Koenig, Sebastian; Chang, Chien-Yen; McConnel, Eric; Suma, Evan; Bolas, Mark; Rizzo, Albert
Designing informed game-bases rehabilitation tasks leveraging advances in virtual reality Journal Article
In: Disability and Rehabilitation, 2012.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@article{lange_designing_2012,
title = {Designing informed game-bases rehabilitation tasks leveraging advances in virtual reality},
author = {Belinda Lange and Sebastian Koenig and Chien-Yen Chang and Eric McConnel and Evan Suma and Mark Bolas and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Designing%20informed%20game-bases%20rehabilitation%20tasks%20leveraging%20advances%20in%20virtual%20reality.pdf},
year = {2012},
date = {2012-04-01},
journal = {Disability and Rehabilitation},
abstract = {This paper details a brief history and rationale for the use of virtual reality (VR) technology for clinical research and intervention, and then focuses on game-based VR applications in the area of rehabilitation. An analysis of the match between rehabilitation task requirements and the assets available with VR technology is presented. Key messages and implications: Low-cost camera-based systems capable of tracking user behavior at sufficient levels for game-based virtual rehabilitation activities are currently available for in- home use. Authoring software is now being developed that aims to provide clinicians with a usable toolkit for leveraging this technology. This will facilitate informed professional input on software design, development and application to ensure safe and effective use in the rehabilitation context. Conclusion: The field of rehabilitation generally stands to benefit from the continual advances in VR technology, concomitant system cost reductions and an expanding clinical research literature and knowledge base. Home-based activity within VR systems that are low-cost, easy to deploy and maintain, and meet the requirements for “good” interactive rehabilitation tasks could radically improve users’ access to care, adherence to prescribed training and subsequently enhance functional activity in everyday life in clinical populations.},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {article}
}
Suma, Evan; Lipps, Zachary; Finklestein, Samantha; Krum, David M.; Bolas, Mark
Impossible Spaces: Maximizing Natural Walking in Virtual Environments with Self-Overlapping Architecture Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 18, no. 4, pp. 555–564, 2012.
Abstract | Links | BibTeX | Tags: MxR
@article{suma_impossible_2012,
title = {Impossible Spaces: Maximizing Natural Walking in Virtual Environments with Self-Overlapping Architecture},
author = {Evan Suma and Zachary Lipps and Samantha Finklestein and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/Impossible%20Spaces-%20Maximizing%20Natural%20Walking%20in%20Virtual%20Environments%20with%20Self-Overlapping%20Architecture.pdf},
year = {2012},
date = {2012-04-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {18},
number = {4},
pages = {555--564},
abstract = {Walking is only possible within immersive virtual environments that fit inside the boundaries of the user’s physical workspace. To reduce the severity of the restrictions imposed by limited physical area, we introduce “impossible spaces,” a new design mechanic for virtual environments that wish to maximize the size of the virtual environment that can be explored with natural locomotion. Such environments make use of self-overlapping architectural layouts, effectively compressing comparatively large in- terior environments into smaller physical areas. We conducted two formal user studies to explore the perception and experience of impossible spaces. In the first experiment, we showed that reasonably small virtual rooms may overlap by as much as 56% before users begin to detect that they are in an impossible space, and that the larger virtual rooms that expanded to maximally fill our avail- able 9.14m x 9.14m workspace may overlap by up to 31%. Our results also demonstrate that users perceive distances to objects in adjacent overlapping rooms as if the overall space was uncompressed, even at overlap levels that were overtly noticeable. In our second experiment, we combined several well-known redirection techniques to string together a chain of impossible spaces in an expansive outdoor scene. We then conducted an exploratory analysis of users’ verbal feedback during exploration, which indicated that impossible spaces provide an even more powerful illusion when users are naive to the manipulation.},
keywords = {MxR},
pubstate = {published},
tppubtype = {article}
}
Hoberman, Perry; Krum, David M.; Suma, Evan; Bolas, Mark
Immersive Training Games for Smartphone-Based Head Mounted Displays Journal Article
In: IEEE Virtual Reality, pp. 151–152, 2012.
Abstract | Links | BibTeX | Tags: MxR
@article{hoberman_immersive_2012,
title = {Immersive Training Games for Smartphone-Based Head Mounted Displays},
author = {Perry Hoberman and David M. Krum and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Immersive%20Training%20Games%20for%20Smartphone-Based%20Head%20Mounted%20Displays.pdf},
year = {2012},
date = {2012-03-01},
journal = {IEEE Virtual Reality},
pages = {151--152},
abstract = {Thin computing clients, such as smartphones and tablets, have exhibited recent growth in display resolutions, processing power, and graphical rendering speeds. In this poster, we show how we leveraged these trends to create virtual reality (VR) training games which run entirely on a commodity mobile computing platform. This platform consists of a commercial off-the-shelf game engine, commodity smartphones, and mass produced optics. The games utilize the strengths of this platform to provide immersive features like 360 degree photo panoramas and interactive 3D virtual scenes. By sharing information about building such applications, we hope to enable others to develop new types of mobile VR applications. In particular, we feel this system is ideally suited for casual “pick up and use” VR applications for collaborative classroom learning, design reviews, and other multi-user immersive experiences.},
keywords = {MxR},
pubstate = {published},
tppubtype = {article}
}
Singh, Gurjot; II, J. Edward Swan; Jones, J. Adam; Ellis, Stephen R.
Depth Judgments by Reaching and Matching in Near-Field Augmented Reality Inproceedings
In: IEEE Virtual Reality, pp. 165–166, Orange County, CA, 2012.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{singh_depth_2012,
title = {Depth Judgments by Reaching and Matching in Near-Field Augmented Reality},
author = {Gurjot Singh and J. Edward Swan II and J. Adam Jones and Stephen R. Ellis},
url = {http://ict.usc.edu/pubs/Depth%20Judgments%20by%20Reaching%20and%20Matching%20in%20Near-Field%20Augmented%20Reality.pdf},
year = {2012},
date = {2012-03-01},
booktitle = {IEEE Virtual Reality},
pages = {165--166},
address = {Orange County, CA},
abstract = {In this abstract we describe an experiment that measured depth judgments in optical see-through augmented reality (AR) at near-field reaching distances of 24 to 56 cm. The 2⬚2 experiment crossed two depth judgment tasks, perceptual matching and blind reaching, with two different environments, a real-world environment and an augmented reality environment. We designed a task that used a direct reaching gesture at constant percentages of each participant's maximum reach; our task was inspired by previous work by Tresilian and Mon-Williams [6] that found very accurate blind reaching results in a real-world environment.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Bruder, Gerd; Steinicke, Frank; Krum, David M.; Bolas, Mark
A Taxonomy for Deploying Redirection Techniques in Immersive Virtual Environments Inproceedings
In: IEEE Virtual Reality, Orange County, CA, 2012.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{suma_taxonomy_2012,
title = {A Taxonomy for Deploying Redirection Techniques in Immersive Virtual Environments},
author = {Evan Suma and Gerd Bruder and Frank Steinicke and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/A%20Taxonomy%20for%20Deploying%20Redirection%20Techniques%20in%20Immersive%20Virtual%20Environments.pdf},
year = {2012},
date = {2012-03-01},
booktitle = {IEEE Virtual Reality},
address = {Orange County, CA},
abstract = {Natural walking can provide a compelling experience in immersive virtual environments, but it remains an implementation challenge due to the physical space constraints imposed on the size of the virtual world. The use of redirection techniques is a promising approach that relaxes the space requirements of natural walking by manipulating the user’s route in the virtual environment, causing the real world path to remain within the boundaries of the physical workspace. In this paper, we present and apply a novel taxonomy that separates redirection techniques according to their geometric flexibility versus the likelihood that they will be noticed by users. Additionally, we conducted a user study of three reorientation techniques, which confirmed that participants were less likely to experience a break in presence when reoriented using the techniques classified as subtle in our taxonomy. Our results also suggest that reorientation with change blindness illusions may give the impression of exploring a more expansive environment than continuous rotation techniques, but at the cost of negatively impacting spatial knowledge acquisition.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Burba, Nathan; Bolas, Mark; Krum, David M.; Suma, Evan
Unobtrusive Measurement of Subtle Nonverbal Behaviors with the Microsoft Kinect Inproceedings
In: IEEE VR Workshop on Ambient Information Technologies, pp. 10–13, Orange County, CA, 2012.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{burba_unobtrusive_2012,
title = {Unobtrusive Measurement of Subtle Nonverbal Behaviors with the Microsoft Kinect},
author = {Nathan Burba and Mark Bolas and David M. Krum and Evan Suma},
url = {http://ict.usc.edu/pubs/Unobtrusive%20Measurement%20of%20Subtle%20Nonverbal%20Behaviors%20with%20the%20Microsoft%20Kinect.pdf},
year = {2012},
date = {2012-03-01},
booktitle = {IEEE VR Workshop on Ambient Information Technologies},
pages = {10--13},
address = {Orange County, CA},
abstract = {We describe two approaches for unobtrusively sensing subtle nonverbal behaviors using a consumer-level depth sensing camera. The first signal, respiratory rate, is estimated by measuring the visual expansion and contraction of the user's chest cavity during inhalation and exhalation. Additionally, we detect a specific type of fidgeting behavior, known as "leg jiggling," by measuring high-frequency vertical oscillations of the user's knees. Both of these techniques rely on the combination of skeletal tracking information with raw depth readings from the sensor to identify the cyclical patterns in jittery, low-resolution data. Such subtle nonverbal signals may be useful for informing models of users' psy- chological states during communication with virtual human agents, thereby improving interactions that address important societal challenges in domains including education, training, and medicine.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoberman, Perry; Sacher, Andrew; Turpin, David; Gotsis, Marientina; Bolas, Mark; Varma, Rohit
Using the Phantogram Technique for a Collaborative Stereoscopic Multitouch Tabletop Game Inproceedings
In: International Conference on Creating, Connecting and Collaborating through Computing, 2012.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{hoberman_using_2012,
title = {Using the Phantogram Technique for a Collaborative Stereoscopic Multitouch Tabletop Game},
author = {Perry Hoberman and Andrew Sacher and David Turpin and Marientina Gotsis and Mark Bolas and Rohit Varma},
url = {http://ict.usc.edu/pubs/Using%20the%20Phantogram%20Technique%20for%20a%20Collaborative%20Stereoscopic%20Multitouch%20Tabletop%20Game.pdf},
year = {2012},
date = {2012-01-01},
booktitle = {International Conference on Creating, Connecting and Collaborating through Computing},
abstract = {This paper outlines the design of a stereoscopic project utilizing the 3D phantogram technique (anamorphically distorted projection onto a horizontal surface) to implement a two-person multitouch game using the Unity 3D engine and IZ3D drivers on the Microsoft Surface tabletop display. The purpose of the project was to develop a engaging platform for the communication of basic concepts about vision and perception for a target audience of children under six and their families in a research clinic setting. Viewed from an appropriate height and position, virtual objects and characters appear to stand directly on the tabletop, facilitating a direct and intuitive mixed reality interface. The technical challenges included occlusion of stereoscopic images by users' hands, the generation of appropriate perspectives for multiple users, and the integration of a two-dimensional multitouch surface with a three-dimensional stereoscopic display.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Suma, Evan; Bolas, Mark
Augmented Reality using Personal Projection and Retroflection Journal Article
In: Personal and Ubiquitous Computing, vol. 16, no. 1, pp. 17–26, 2012.
Abstract | Links | BibTeX | Tags: MxR
@article{krum_augmented_2012,
title = {Augmented Reality using Personal Projection and Retroflection},
author = {David M. Krum and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Augmented%20Reality%20using%20Personal%20Projection%20and%20Retroflection.pdf},
year = {2012},
date = {2012-01-01},
journal = {Personal and Ubiquitous Computing},
volume = {16},
number = {1},
pages = {17--26},
abstract = {The support of realistic and flexible training simulations for military, law enforcement, emergency response, and other domains has been an important motivator for the development of augmented reality technology. An important vision for achieving this goal has been the creation of a versatile "stage" for physical, emotional, and cognitive training that combines virtual characters and environments with real world elements, such as furniture and props. This paper presents REFLCT, a mixed reality projection framework that couples a near-axis personal projector design with tracking and novel retroreflective props and surfaces. REFLCT provides multiple users with personalized, perspective correct imagery that is uniquely composited for each user directly into and onto a surrounding environment, without any optics positioned in front of the user’s eyes or face. These characteristics facilitate team training experiences which allow users to easily interact with their teammates while wearing their standard issue gear. REFLCT can present virtual humans who can make deictic gestures and establish eye contact without the geometric ambiguity of a typical projection display. It can also display perspective correct scenes that require a realistic approach for detecting and communicating potential threats between multiple users in disparate locations. In addition to training applications, this display system appears to be well-matched with other user interface and application domains, such as asymmetric collaborative workspaces and personal information guides.},
keywords = {MxR},
pubstate = {published},
tppubtype = {article}
}
2011
Jones, Andrew; Fyffe, Graham; Yu, Xueming; Ma, Wan-Chun; Busch, Jay; Ichikari, Ryosuke; Bolas, Mark; Debevec, Paul
Head-mounted Photometric Stereo for Performance Capture Inproceedings
In: 8th European Conference on Visual Media Production (CVMP 2011), London, UK, 2011.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_head-mounted_2011,
title = {Head-mounted Photometric Stereo for Performance Capture},
author = {Andrew Jones and Graham Fyffe and Xueming Yu and Wan-Chun Ma and Jay Busch and Ryosuke Ichikari and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Head-mounted%20Photometric%20Stereo%20for%20Performance%20Capture.pdf},
year = {2011},
date = {2011-11-01},
booktitle = {8th European Conference on Visual Media Production (CVMP 2011)},
address = {London, UK},
abstract = {Head-mounted cameras are an increasingly important tool for capturing facial performances to drive virtual characters. They provide a fixed, unoccluded view of the face, useful for observing motion capture dots or as input to video analysis. However, the 2D imagery captured with these systems is typically affected by ambient light and generally fails to record subtle 3D shape changes as the face performs. We have developed a system that augments a head-mounted camera with LED-based photometric stereo. The system allows observation of the face independent of the ambient light and generates per-pixel surface normals so that the performance is recorded dynamically in 3D. The resulting data can be used for facial relighting or as better input to machine learning algorithms for driving an animated face.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Lange, Belinda; Suma, Evan; Bolas, Mark
Virtual Reality and Interactive Digital Game Technology: New Tools to Address Obesity and Diabetes Journal Article
In: Journal of Diabetes Science and Technology, vol. 5, no. 2, pp. 256–264, 2011.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@article{rizzo_virtual_2011,
title = {Virtual Reality and Interactive Digital Game Technology: New Tools to Address Obesity and Diabetes},
author = {Albert Rizzo and Belinda Lange and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20and%20Interactive%20Digital%20Game%20Technology-%20New%20Tools%20to%20Address%20Obesity%20and%20Diabetes.pdf},
year = {2011},
date = {2011-03-01},
journal = {Journal of Diabetes Science and Technology},
volume = {5},
number = {2},
pages = {256--264},
abstract = {The convergence of the exponential advances in virtual reality (VR)-enabling technologies with a growing body of clinical research and experience has fueled the evolution of the discipline of clinical VR. This article begins with a brief overview of methods for producing and delivering VR environments that can be accessed by users for a range of clinical health conditions. Interactive digital games and new forms of natural movementbased interface devices are also discussed in the context of the emerging area of exergaming, along with some of the early results from studies of energy expenditure during the use of these systems. While these results suggest that playing currently available active exergames uses significantly more energy than sedentary activities and is equivalent to a brisk walk, these activities do not reach the level of intensity that would match playing the actual sport, nor do they deliver the recommended daily amount of exercise for children. However, these results provide some support for the use of digital exergames using the current state of technology as a complement to, rather than a replacement, for regular exercise. This may change in the future as new advances in novel full-body interaction systems for providing vigorous interaction with digital games are expected to drive the creation of engaging, low-cost interactive game-based applications designed to increase exercise participation in persons at risk for obesity.},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {article}
}
Suma, Evan; Lange, Belinda; Rizzo, Albert; Krum, David M.; Bolas, Mark
FAAST: The Flexible Action and Articulated Skeleton Toolkit Inproceedings
In: IEEE Virtual Reality, pp. 245–246, Singapore, 2011.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@inproceedings{suma_faast_2011,
title = {FAAST: The Flexible Action and Articulated Skeleton Toolkit},
author = {Evan Suma and Belinda Lange and Albert Rizzo and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/FAAST-%20The%20Flexible%20Action%20and%20Articulated%20Skeleton%20Toolkit.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Virtual Reality},
pages = {245--246},
address = {Singapore},
abstract = {The Flexible Action and Articulated Skeleton Toolkit (FAAST) is middleware to facilitate integration of full-body control with virtual reality applications and video games using OpenNI-compliant depth sensors (currently the PrimeSensor and the Microsoft Kinect). FAAST incorporates a VRPN server for streaming the user's skeleton joints over a network, which provides a convenient interface for custom virtual reality applications and games. This body pose information can be used for goals such as realistically puppeting a virtual avatar or controlling an on-screen mouse cursor. Additionally, the toolkit also provides a configurable input emulator that detects human actions and binds them to virtual mouse and keyboard commands, which are sent to the actively selected window. Thus, FAAST can enable natural interaction for existing off-the-shelf video games that were not explicitly developed to support input from motion sensors. The actions and input bindings are configurable at run-time, allowing the user to customize the controls and sensitivity to adjust for individual body types and preferences. In the future, we plan to substantially expand FAAST’s action lexicon, provide support for recording and training custom gestures, and incorporate real-time head tracking using computer vision techniques.},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Krum, David M.; Bolas, Mark
Sharing Space in Mixed and Virtual Reality Environments Using a Low-Cost Depth Sensor Inproceedings
In: IEEE International Symposium on Virtual Reality Innovations, Singapore, 2011.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{suma_sharing_2011,
title = {Sharing Space in Mixed and Virtual Reality Environments Using a Low-Cost Depth Sensor},
author = {Evan Suma and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/Sharing%20Space%20in%20Mixed%20and%20Virtual%20Reality%20Environments%20Using%20a%20Low-Cost%20Depth%20Sensor.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE International Symposium on Virtual Reality Innovations},
address = {Singapore},
abstract = {We describe an approach for enabling people to share virtual space with a user that is fully immersed in a head-mounted display. By mounting a recently developed low-cost depth sensor to the user's head, depth maps can be generated in real-time based on the user's gaze direction, allowing us to create mixed reality experiences by merging real people and objects into the virtual environment. This enables verbal and nonverbal communication between users that would normally be isolated from one another. We present the implementation of the technique, then discuss the advantages and limitations of using commercially available depth sensing technology in immersive virtual reality applications.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Clark, Seth; Finklestein, Samantha; Wartell, Zachary; Krum, David M.; Bolas, Mark
Leveraging Change Blindness for Redirection in Virtual Environments Inproceedings
In: IEEE Virtual Reality, pp. 159–166, 2011.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{suma_leveraging_2011,
title = {Leveraging Change Blindness for Redirection in Virtual Environments},
author = {Evan Suma and Seth Clark and Samantha Finklestein and Zachary Wartell and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/Leveraging%20Change%20Blindness%20for%20Redirection%20in%20Virtual%20Environments.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Virtual Reality},
pages = {159--166},
abstract = {We present change blindness redirection, a novel technique for allowing the user to walk through an immersive virtual environment that is considerably larger than the available physical workspace. This approach, based on a dynamic environment model, improves on previous redirection techniques, as it does not introduce any visual-vestibular conflicts from manipulating the mapping between physical and virtual motions, nor does it require breaking presence to stop and explicitly reorient the user. We conducted two user studies to evaluate the effectiveness of the change blindness illusion when exploring a virtual environment that was an order of magnitude larger than the physical walking space. Despite the dynamically changing environment, participants were able to draw coherent sketch maps of the environment structure, and pointing task results indicated that they were able to maintain their spatial orientation within the virtual world. Only one out of 77 participants across both both studies definitively noticed that a scene change had occurred, suggesting that change blindness redirection provides a remarkably compelling illusion. Secondary findings revealed that a wide field-of-view increases pointing accuracy and that experienced gamers reported greater sense of presence than those with little or no experience with 3D video games.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Suma, Evan; Krum, David M.; Finklestein, Samantha; Bolas, Mark
Effects of Redirection on Spatial Orientation in Real and Virtual Environments Inproceedings
In: IEEE Symposium on 3D User Interfaces, pp. 35–38, Singapore, 2011.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{suma_effects_2011,
title = {Effects of Redirection on Spatial Orientation in Real and Virtual Environments},
author = {Evan Suma and David M. Krum and Samantha Finklestein and Mark Bolas},
url = {http://ict.usc.edu/pubs/Effects%20of%20Redirection%20on%20Spatial%20Orientation%20in%20Real%20and%20Virtual%20Environments.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Symposium on 3D User Interfaces},
pages = {35--38},
address = {Singapore},
abstract = {We report a user study that investigated the effect of redirection in an immersive virtual environment on spatial orientation relative to both real world and virtual stimuli. Participants performed a series of spatial pointing tasks with real and virtual targets, during which they experienced three within-subjects conditions: rotation-based redirection, change blindness redirection, and no redirection. Our results indicate that when using the rotation technique, participants spatially updated both their virtual and real world orientations during redirection, resulting in pointing accuracy to the targets' recomputed positions that was strikingly similar to the control condition. While our data also suggest that a similar spatial updating may have occurred when using a change blindness technique, the realignment of targets appeared to be more complicated than a simple rotation, and was thus difficult to measure quantitatively.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Olson, Logan; Krum, David M.; Suma, Evan; Bolas, Mark
A Design for a Smartphone-Based Head Mounted Display Inproceedings
In: IEEE Virtual Reality, pp. 233–234, Singapore, 2011.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{olson_design_2011,
title = {A Design for a Smartphone-Based Head Mounted Display},
author = {Logan Olson and David M. Krum and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/A%20Design%20for%20a%20Smartphone-Based%20Head%20Mounted%20Display.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Virtual Reality},
pages = {233--234},
address = {Singapore},
abstract = {Thin computing clients, such as smartphones and tablets, have experienced recent growth in display resolutions and graphics processing power. In this poster, we show how to leverage these trends to create an experimental wide field of view, 3D stereoscopic head mounted display (HMD), based on two high resolution smartphones. This HMD prototype is unique in that the graphics system is entirely onboard, allowing it to be lightweight, wireless, and convenient to use.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Babul, Sabarish V.; Suma, Evan; Hodges, Larry F.; Barnes, Tiffany
Learning Cultural Conversation Protocols with Immersive Interactive Virtual Humans Journal Article
In: International Journal of Artificial Intelligence, vol. 10, no. 4, pp. 25–35, 2011.
Abstract | Links | BibTeX | Tags: MxR
@article{babul_learning_2011,
title = {Learning Cultural Conversation Protocols with Immersive Interactive Virtual Humans},
author = {Sabarish V. Babul and Evan Suma and Larry F. Hodges and Tiffany Barnes},
url = {http://ict.usc.edu/pubs/Learning%20Cultural%20Conversation%20Protocols%20with%20Immersive%20Interactive%20Virtual%20Humans.pdf},
year = {2011},
date = {2011-01-01},
journal = {International Journal of Artificial Intelligence},
volume = {10},
number = {4},
pages = {25--35},
abstract = {This paper reports on a study conducted to investigate the effects of using immersive virtual humans in natural multi-modal interaction to teach users cultural conversational verbal and non-verbal protocols in south Indian culture. The study was conducted using a between-subjects experimental design. We compared instruction and interactive feedback from immersive virtual humans against instruction based on a written study guide with illustrations of the cultural protocols. Participants were then tested on how well they learned the cultural conversational protocols by exercising the cultural conventions in front of videos of real people. Subjective evaluations of participants' performance was conducted by three south Indian reviewers who were blind to the condition the participants were assigned. Objective evaluations of participants' performance were conducted on the motion tracking log data recorded during the testing session. We also measured the participants' pre and post positive and negative affect of training in both conditions, as well as the effect of co-presence with the life-size virtual south Indians. The results of our subjective evaluation suggest that participants who trained with the virtual humans performed significantly better than the participants who studied from literature. The results also revealed that there were no significant differences in positive or negative affect between conditions. However, overall for all participants in both conditions, positive affect increased and negative affect decreased from before to after instruction.},
keywords = {MxR},
pubstate = {published},
tppubtype = {article}
}
Lange, Belinda; Rizzo, Albert; Chang, Chien-Yen; Suma, Evan; Bolas, Mark
Markerless Full Body Tracking: Depth-Sensing Technology within Virtual Environments Inproceedings
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), Orlando, FL, 2011.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@inproceedings{lange_markerless_2011,
title = {Markerless Full Body Tracking: Depth-Sensing Technology within Virtual Environments},
author = {Belinda Lange and Albert Rizzo and Chien-Yen Chang and Evan Suma and Mark Bolas},
url = {http://ict.usc.edu/pubs/Markerless%20Full%20Body%20Tracking-%20Depth-Sensing%20Technology%20within%20Virtual%20Environments.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {Over the last decade there has been growing recognition of the potential value of virtual reality and game technology for creating a new generation of tools for advancing rehabilitation, training and exercise activities. However, until recently the only way people could interact with digital games and virtual reality simulations, was by using relatively constrained gamepad, joystick and keyboard interface devices. Thus, rather than promoting physical activity, these modes of interaction encourage a more sedentary approach to playing games, typically while seated on the couch or in front of a desk. More complex and expensive motion tracking systems enable immersive interactions but are only available at restricted locations and are not readily available in the home setting. Recent advances in video game technology have fueled a proliferation of low-cost devices that can sense the user's motion. This paper will present and discuss three potential applications of the new depth-sensing camera technology from PrimeSense and Microsoft Kinect. The paper will outline the technology underlying the sensor, the development of our open source middleware allowing developers to make applications, and provide examples of applications that enhance interaction within virtual environments and game-based training/rehabilitation tools. The PrimeSense or Kinect sensors, along with open source middleware, provide markerless full-body tracking on a conventional PC using a single plug and play USB sensor. This technology provides a fully articulated skeleton that digitizes the user's body pose and directly quantizes their movements in real time without encumbering the user with tracking devices or markers. We have explored the integration of the depth sensing technology and middleware within three applications: 1) virtual environments, 2) gesture controlled PC games, 3) a game developed to target specific movements for rehabilitation. The benefits of implementing this technology in these three areas demonstrate the potential to provide needed applications for modern-day warfighters.},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Jurik, Joel; Jones, Andrew; Bolas, Mark; Debevec, Paul
Prototyping a Light Field Display Involving Direct Observation of a Video Projector Array Inproceedings
In: IEEE International Workshop on Projector–Camera Systems (PROCAMS), Colorado Springs, CO, 2011.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jurik_prototyping_2011,
title = {Prototyping a Light Field Display Involving Direct Observation of a Video Projector Array},
author = {Joel Jurik and Andrew Jones and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Prototyping%20a%20Light%20Field%20Display%20Involving%20Direct%20Observation%20of%20a%20Video%20Projector%20Array.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {IEEE International Workshop on Projector–Camera Systems (PROCAMS)},
address = {Colorado Springs, CO},
abstract = {We present a concept for a full-parallax light field display achieved by having users look directly into an array of video projectors. Each projector acts as one angularly varying pixel, so the display's spatial resolution depends on the number of video projectors and the angular resolution depends on the pixel resolution of any one video projector. We prototype a horizontal-parallax-only arrangement by mechanically moving a single pico-projector to an array of positions, and use long-exposure photography to simulate video of a horizontal array. With this setup, we determine the minimal projector density required to produce a continuous image, and describe practical ways to achieve such density and to realize the resulting system. We finally show that if today's pico-projectors become sufficiently inexpensive, immersive full-parallax displays with arbitrarily high spatial and angular resolution will become possible.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Lange, Belinda; Suma, Evan; Newman, Brad; Phan, Thai; Chang, Chien-Yen; Rizzo, Albert; Bolas, Mark
Leveraging Unencumbered Full Body Control of Animated Virtual Characters for Game-Based Rehabilitation Inproceedings
In: HCI International, 2011.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@inproceedings{lange_leveraging_2011,
title = {Leveraging Unencumbered Full Body Control of Animated Virtual Characters for Game-Based Rehabilitation},
author = {Belinda Lange and Evan Suma and Brad Newman and Thai Phan and Chien-Yen Chang and Albert Rizzo and Mark Bolas},
url = {http://ict.usc.edu/pubs/Leveraging%20Unencumbered%20Full%20Body%20Control%20of%20Animated%20Virtual%20Characters%20for%20Game-Based%20Rehabilitation.pdf},
year = {2011},
date = {2011-01-01},
booktitle = {HCI International},
abstract = {The use of commercial video games as rehabilitation tools, such as the Nintendo Wii Fit, has recently gained much interest in the physical therapy arena. However, physical rehabilitation requires accurate and appropriate tracking and feedback of performance, often not provided by existing commercial console devices or games. This paper describes the development of an application that leverages recent advances in commercial video game technology to provide full-body control of animated virtual characters with low cost markerless tracking. The aim of this research is to develop and evaluate an interactive game-based rehabilitation tool for balance training of adults with neurological injury. This paper outlines the development and evaluation of a game-based rehabilitation tool using the PrimeSense depth sensing technology, designed to elicit specific therapeutic motions when controlling a virtual avatar in pursuit of in-game goals. A sample of nine adults participated in the initial user testing, providing feedback on the hardware and software prototype.},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
2010
Sadek, Ramy; Krum, David M.; Bolas, Mark
Simulating Hearing Loss in Virtual Training Inproceedings
In: Audio Engineering Society, San Francisco, CA, 2010.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{sadek_simulating_2010,
title = {Simulating Hearing Loss in Virtual Training},
author = {Ramy Sadek and David M. Krum and Mark Bolas},
url = {http://ict.usc.edu/pubs/Simulating%20Hearing%20Loss%20in%20Virtual%20Training.pdf},
year = {2010},
date = {2010-11-01},
booktitle = {Audio Engineering Society},
address = {San Francisco, CA},
abstract = {Audio systems for virtual reality and augmented reality training environments commonly focus on high-quality audio reproduction. Yet many trainees may face real-world situations wherein hearing is compromised. In these cases, the hindrance caused by impaired or lost hearing is a significant stressor that may affect performance. Because this phenomenon is hard to simulate without actually causing hearing damage, trainees are largely unpracticed at operating with diminished hearing. To improve the match between training scenarios and real-world situations, this effort aims to add simulated hearing loss or impairment as a training variable. The goal is to affect everything users hear –including non-simulated sounds such as their own and each other's voices – without overt noticeability, risk to hearing, or requiring headphones.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Sadek, Ramy
Automatic Parallelism for Dataflow Graphs Inproceedings
In: Audio Engineering Society, San Francisco, CA, 2010.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{sadek_automatic_2010,
title = {Automatic Parallelism for Dataflow Graphs},
author = {Ramy Sadek},
url = {http://ict.usc.edu/pubs/Automatic%20Parallelism%20for%20Dataflow%20Graphs.pdf},
year = {2010},
date = {2010-11-01},
booktitle = {Audio Engineering Society},
address = {San Francisco, CA},
abstract = {This paper presents a novel algorithm to automate high-level parallelization from graph-based data structures representing data flow. This automatic optimization yields large performance improvements for multi-core machines running host-based applications. Results of these advances are shown through their incorporation into the audio processing engine Application Rendering Immersive Audio (ARIA) presented at AES 117. Although the ARIA system is the target framework, the contributions presented in this paper are generic and therefore applicable in a variety of software such as Pure Data and Max/MSP, game audio engines, non-linear editors and related systems. Additionally, the parallel execution paths extracted are shown to give effectively optimal cache performance, yielding significant speedup for such host-based applications.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, Jo Ann; Boehm, Barry; Bolas, Mark; Madni, Azad; Turner, Richard
Critical Success Factors for Rapid, Innovative Solutions Inproceedings
In: New Modeling Concepts for Today's Software Processes, pp. 52–61, Springer-Verlag, Paderborn, Germany, 2010.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{lane_critical_2010,
title = {Critical Success Factors for Rapid, Innovative Solutions},
author = {Jo Ann Lane and Barry Boehm and Mark Bolas and Azad Madni and Richard Turner},
url = {http://ict.usc.edu/pubs/Critical%20Success%20Factors%20for%20Rapid%20Innovative%20Solutions.pdf},
year = {2010},
date = {2010-07-01},
booktitle = {New Modeling Concepts for Today's Software Processes},
volume = {6195},
pages = {52--61},
publisher = {Springer-Verlag},
address = {Paderborn, Germany},
abstract = {Many of today's problems are in search of new, innovative solutions. However, the development of new and innovative solutions has been elusive to many, resulting in considerable effort and dollars and no solution or a mediocre solution late to the marketplace or customer. This paper describes the results of research conducted to identify the critical success factors employed by several successful, high-performance organizations in the development of innovative systems. These critical success factors span technical, managerial, people, and cultural aspects of the innovative environment.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}