Publications
Search
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Matt; Nye, Benjamin; Sinatra, Gale; Swartout, William; Sjӧberg, Molly; Porter, Molly; Nelson, David; Kennedy, Alana; Herrick, Imogen; Weeks, Danaan DeNeve; Lindsey, Emily
Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits Journal Article
In: Palaeontolgia Electronica, 2022, ISSN: 19353952, 10948074.
@article{davis_designing_2022,
title = {Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits},
author = {Matt Davis and Benjamin Nye and Gale Sinatra and William Swartout and Molly Sjӧberg and Molly Porter and David Nelson and Alana Kennedy and Imogen Herrick and Danaan DeNeve Weeks and Emily Lindsey},
url = {https://palaeo-electronica.org/content/2022/3524-la-brea-tar-pits-paleoart},
doi = {10.26879/1191},
issn = {19353952, 10948074},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-15},
journal = {Palaeontolgia Electronica},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin; Nelson, David; Herrick, Imogen; Sinatra, Gale; Swartout, Bill; Porter, Molly; Davis, Matt; Lindsey, Emily
SCIENCE BIG and SMALL: Visiting the Ice Age through Miniature and Life-Sized Augmented Reality Experiences Proceedings Article
In: TMS Proceedings 2021, American Psychological Association, 2021.
@inproceedings{nye_science_2021,
title = {SCIENCE BIG and SMALL: Visiting the Ice Age through Miniature and Life-Sized Augmented Reality Experiences},
author = {Benjamin Nye and David Nelson and Imogen Herrick and Gale Sinatra and Bill Swartout and Molly Porter and Matt Davis and Emily Lindsey},
url = {https://tmb.apaopen.org/pub/djue4kjf},
doi = {10.1037/tms0000106},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-21},
booktitle = {TMS Proceedings 2021},
publisher = {American Psychological Association},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Proceedings Article
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Soleymani, Mohammad; Stefanov, Kalin; Kang, Sin-Hwa; Ondras, Jan; Gratch, Jonathan
Multimodal Analysis and Estimation of Intimate Self-Disclosure Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19, pp. 59–68, ACM Press, Suzhou, China, 2019, ISBN: 978-1-4503-6860-5.
@inproceedings{soleymani_multimodal_2019,
title = {Multimodal Analysis and Estimation of Intimate Self-Disclosure},
author = {Mohammad Soleymani and Kalin Stefanov and Sin-Hwa Kang and Jan Ondras and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3340555.3353737},
doi = {10.1145/3340555.3353737},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19},
pages = {59–68},
publisher = {ACM Press},
address = {Suzhou, China},
abstract = {Self-disclosure to others has a proven benefit for one’s mental health. It is shown that disclosure to computers can be similarly beneficial for emotional and psychological well-being. In this paper, we analyzed verbal and nonverbal behavior associated with self-disclosure in two datasets containing structured human-human and human-agent interviews from more than 200 participants. Correlation analysis of verbal and nonverbal behavior revealed that linguistic features such as affective and cognitive content in verbal behavior, and nonverbal behavior such as head gestures are associated with intimate self-disclosure. A multimodal deep neural network was developed to automatically estimate the level of intimate self-disclosure from verbal and nonverbal behavior. Between modalities, verbal behavior was the best modality for estimating self-disclosure within-corpora achieving r = 0.66. However, the cross-corpus evaluation demonstrated that nonverbal behavior can outperform language modality in cross-corpus evaluation. Such automatic models can be deployed in interactive virtual agents or social robots to evaluate rapport and guide their conversational strategy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Traum, David
Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies Proceedings Article
In: Proceedings of the 9th Language and Technology Conference, LTC, Poznań, Poland, 2019.
@inproceedings{chaffey_developing_2019,
title = {Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and David Traum},
url = {http://www-scf.usc.edu/ nasihati/publications/HLTCEM_2019.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 9th Language and Technology Conference},
publisher = {LTC},
address = {Poznań, Poland},
abstract = {Search and rescue missions involving robots face multiple challenges. The ratio of operators to robots is frequently one to one or higher, operators tasked with robots must contend with cognitive overload for long periods, and the robots themselves may be discomfiting to located survivors. To improve on the current state, we propose a swarm of robots equipped with natural language abilities and guided by a central virtual “spokesperson” able to access “plays”. The spokesperson may assist the operator with tasking the robots in their exploration of a zone, which allows the operator to maintain a safe distance. The use of multiple robots enables rescue personnel to cover a larger swath of ground, and the natural language component allows the robots to communicate with survivors located on site. This capability frees the operator to handle situations requiring personal attention, and overall can accelerate the location and assistance of survivors. In order to develop this system, we are creating a virtual reality simulation, in order to conduct a study and analysis of how humans communicate with these swarms of robots. The data collected from this experiment will inform how to best design emergency response swarm robots that are effectively able to communicate with the humans around them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Panlener, William; Krum, David M; Jones, J Adam
Effects of Horizontal Field of View Extension on Spatial Judgments in Virtual Reality Proceedings Article
In: Proceedings of the IEEE Southeast Conference 2019, pp. 7, IEEE, Huntsville, AL, 2019.
@inproceedings{panlener_effects_2019,
title = {Effects of Horizontal Field of View Extension on Spatial Judgments in Virtual Reality},
author = {William Panlener and David M Krum and J Adam Jones},
url = {https://www.researchgate.net/publication/332448571_Effects_of_Horizontal_Field_of_View_Extension_on_Spatial_Judgments_in_Virtual_Reality},
year = {2019},
date = {2019-04-01},
booktitle = {Proceedings of the IEEE Southeast Conference 2019},
pages = {7},
publisher = {IEEE},
address = {Huntsville, AL},
abstract = {It is known that observers tend to misperceive distances during spatial judgment tasks in virtual reality. Virtual environments restrict field of view as compared to real environments. We explore whether horizontal field of view restriction affects real or perceived ocular convergence. We also explore effects that the size and symmetry of field of view may have. We find that convergence is not impacted by altering field of view, but the subjective median plane is affected. We also find that distance is better estimated in wider fields of view, and that lateral bias in estimation is correlated to the symmetry of the field.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dennison, Mark S.; Krum, David M.
Unifying Research to Address Motion Sickness Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 1858–1859, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72811-377-7.
@inproceedings{dennison_unifying_2019,
title = {Unifying Research to Address Motion Sickness},
author = {Mark S. Dennison and David M. Krum},
url = {https://ieeexplore.ieee.org/document/8798297/},
doi = {10.1109/VR.2019.8798297},
isbn = {978-1-72811-377-7},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
pages = {1858–1859},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Be it discussed as cybersickness, immersive sickness, simulator sickness, or virtual reality sickness, the ill effects of visuo-vestibular mismatch in immersive environments are of great concern for the wider adoption of virtual reality and related technologies. In this position paper, we discuss a unified research approach that may address motion sickness and identify critical research topics.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sinhwa; Chanenson, Jake; Cowal, Peter; Weaver, Madeleine
Advancing Ethical Decision Making in Virtual Reality Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 2, IEEE, Osaka, Japan, 2019.
@inproceedings{kang_advancing_2019,
title = {Advancing Ethical Decision Making in Virtual Reality},
author = {Sinhwa Kang and Jake Chanenson and Peter Cowal and Madeleine Weaver},
url = {https://ieeexplore.ieee.org/document/8798151},
doi = {10.1109/VR.2019.8798151},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
pages = {2},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Virtual reality (VR) has been widely utilized for training and education purposes because of pedagogical, safety, and economic benefits. The investigation of moral judgment is a particularly interesting VR application, related to training. For this study, we designed a withinsubject experiment manipulating the role of study participants in a Trolley Dilemma scenario: either victim or driver. We conducted a pilot study with four participants and describe preliminary results and implications in this poster.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bernardet, Ulysses; Kanq, Sin-Hwa; Feng, Andrew; DiPaola, Steve; Shapiro, Ari
Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study Proceedings Article
In: 2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE), pp. 1–9, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72813-219-8.
@inproceedings{bernardet_speech_2019,
title = {Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study},
author = {Ulysses Bernardet and Sin-Hwa Kanq and Andrew Feng and Steve DiPaola and Ari Shapiro},
url = {https://ieeexplore.ieee.org/document/8714737/},
doi = {10.1109/VHCIE.2019.8714737},
isbn = {978-1-72813-219-8},
year = {2019},
date = {2019-03-01},
booktitle = {2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)},
pages = {1–9},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Human speech production requires the dynamic regulation of air through the vocal system. While virtual character systems commonly are capable of speech output, they rarely take breathing during speaking – speech breathing – into account. We believe that integrating dynamic speech breathing systems in virtual characters can significantly contribute to augmenting their realism. Here, we present a novel control architecture aimed at generating speech breathing in virtual characters. This architecture is informed by behavioral, linguistic and anatomical knowledge of human speech breathing. Based on textual input and controlled by a set of lowand high-level parameters, the system produces dynamic signals in real-time that control the virtual character’s anatomy (thorax, abdomen, head, nostrils, and mouth) and sound production (speech and breathing).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, J. Adam; Hopper, Jonathan E.; Bolas, Mark T.; Krum, David M.
Orientation Perception in Real and Virtual Environments Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, pp. 2050 – 2060, 2019, ISSN: 1077-2626, 1941-0506, 2160-9306.
@article{jones_orientation_2019,
title = {Orientation Perception in Real and Virtual Environments},
author = {J. Adam Jones and Jonathan E. Hopper and Mark T. Bolas and David M. Krum},
url = {https://ieeexplore.ieee.org/document/8642384/},
doi = {10.1109/TVCG.2019.2898798},
issn = {1077-2626, 1941-0506, 2160-9306},
year = {2019},
date = {2019-02-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {2050 – 2060},
abstract = {Spatial perception in virtual environments has been a topic of intense research. Arguably, the majority of this work has focused on distance perception. However, orientation perception is also an important factor. In this paper, we systematically investigate allocentric orientation judgments in both real and virtual contexts over the course of four experiments. A pattern of sinusoidal judgment errors known to exist in 2D perspective displays is found to persist in immersive virtual environments. This pattern also manifests itself in a real world setting using two differing judgment methods. The findings suggest the presence of a radial anisotropy that persists across viewing contexts. Additionally, there is some evidence to suggest that observers have multiple strategies for processing orientations but further investigation is needed to fully describe this phenomenon. We also offer design suggestions for 3D user interfaces where users may perform orientation judgments.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gil, Yolanda; Hill, Mary; Horel, John; Hsu, Leslie; Kinter, Jim; Knoblock, Craig; Krum, David; Kumar, Vipin; Lermusiaux, Pierre; Liu, Yan; North, Chris; Pierce, Suzanne A.; Pankratius, Victor; Peters, Shanan; Plale, Beth; Pope, Allen; Ravela, Sai; Restrepo, Juan; Ridley, Aaron; Samet, Hanan; Shekhar, Shashi; Babaie, Hassan; Banerjee, Arindam; Borne, Kirk; Bust, Gary; Cheatham, Michelle; Ebert-Uphoff, Imme; Gomes, Carla
Intelligent systems for geosciences: an essential research agenda Journal Article
In: Communications of the ACM, vol. 62, no. 1, pp. 76–84, 2018, ISSN: 00010782.
@article{gil_intelligent_2018,
title = {Intelligent systems for geosciences: an essential research agenda},
author = {Yolanda Gil and Mary Hill and John Horel and Leslie Hsu and Jim Kinter and Craig Knoblock and David Krum and Vipin Kumar and Pierre Lermusiaux and Yan Liu and Chris North and Suzanne A. Pierce and Victor Pankratius and Shanan Peters and Beth Plale and Allen Pope and Sai Ravela and Juan Restrepo and Aaron Ridley and Hanan Samet and Shashi Shekhar and Hassan Babaie and Arindam Banerjee and Kirk Borne and Gary Bust and Michelle Cheatham and Imme Ebert-Uphoff and Carla Gomes},
url = {http://dl.acm.org/citation.cfm?doid=3301004.3192335},
doi = {10.1145/3192335},
issn = {00010782},
year = {2018},
date = {2018-12-01},
journal = {Communications of the ACM},
volume = {62},
number = {1},
pages = {76–84},
abstract = {A research agenda for intelligent systems that will result in fundamental new capabilities for understanding the Earth system.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen
Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing Proceedings Article
In: Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018, pp. 17–22, ACM Press, Beijing, China, 2018, ISBN: 978-1-4503-6376-1.
@inproceedings{kang_socio-cultural_2018,
title = {Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang},
url = {http://dl.acm.org/citation.cfm?doid=3205326.3205348},
doi = {10.1145/3205326.3205348},
isbn = {978-1-4503-6376-1},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018},
pages = {17–22},
publisher = {ACM Press},
address = {Beijing, China},
abstract = {We explored how users perceive virtual characters that performed the role of a counseling interviewer, while presenting different levels of social class, as well as single or multi-tasking behavior. To investigate this subject, we designed a 2x2 experiment (tasking type and social class of the virtual counseling interviewer). In the experiment, participants experienced the counseling interview interactions over video conferencing on a smartphone. We measured user responses to and perceptions of the virtual human interviewer. The results demonstrate that the tasking types and social class of the virtual counselor affected user responses to and perceptions of the virtual counselor. The results offer insight into the design and development of effective, realistic, and believable virtual human counselors. Furthermore, the results also address current social questions about how smartphones might mediate social interactions, including human-agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Trout, Theron; Russell, Stephen M.; Harrison, Andre V.; Spicer, Ryan; Dennison, Mark S.; Thomas, Jerald; Rosenberg, Evan Suma
Collaborative mixed reality (MxR) and networked decision making Proceedings Article
In: Next-Generation Analyst VI, pp. 21, SPIE, Orlando, Florida, 2018, ISBN: 978-1-5106-1817-6 978-1-5106-1818-3.
@inproceedings{trout_collaborative_2018,
title = {Collaborative mixed reality (MxR) and networked decision making},
author = {Theron Trout and Stephen M. Russell and Andre V. Harrison and Ryan Spicer and Mark S. Dennison and Jerald Thomas and Evan Suma Rosenberg},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10653/2309959/Collaborative-mixed-reality-MxR-and-networked-decision-making/10.1117/12.2309959.full},
doi = {10.1117/12.2309959},
isbn = {978-1-5106-1817-6 978-1-5106-1818-3},
year = {2018},
date = {2018-04-01},
booktitle = {Next-Generation Analyst VI},
pages = {21},
publisher = {SPIE},
address = {Orlando, Florida},
abstract = {Collaborative decision-making remains a significant research challenge that is made even more complicated in real-time or tactical problem-contexts. Advances in technology have dramatically assisted the ability for computers and networks to improve the decision-making process (i.e. intelligence, design, and choice). In the intelligence phase of decision making, mixed reality (MxR) has shown a great deal of promise through implementations of simulation and training. However little research has focused on an implementation of MxR to support the entire scope of the decision cycle, let alone collaboratively and in a tactical context. This paper presents a description of the design and initial implementation for the Defense Integrated Collaborative Environment (DICE), an experimental framework for supporting theoretical and empirical research on MxR for tactical decision-making support.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M; Kang, Sin-Hwa; Phan, Thai
Influences on the Elicitation of Interpersonal Space with Virtual Humans Proceedings Article
In: Proceedings of the 2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), IEEE, Tuebingen/Reutlingen, Germany, 2018, ISBN: 978-1-5386-3365-6.
@inproceedings{krum_influences_2018,
title = {Influences on the Elicitation of Interpersonal Space with Virtual Humans},
author = {David M Krum and Sin-Hwa Kang and Thai Phan},
url = {https://ieeexplore.ieee.org/document/8446235/#full-text-section},
doi = {10.1109/VR.2018.8446235},
isbn = {978-1-5386-3365-6},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
publisher = {IEEE},
address = {Tuebingen/Reutlingen, Germany},
abstract = {The emergence of low cost virtual and augmented reality systems has encouraged the development of immersive training applications for medical, military, and many other fields. Many of the training scenarios for these various fields may require the presentation of realistic interactions with virtual humans. It is thus vital to determine the critical factors of fidelity required in those interactions to elicit naturalistic behavior on the part of trainees. Negative training may occur if trainees are inadvertently influenced to react in ways that are unexpected and unnatural, hindering proper learning and transfer of skills and knowledge back into real world contexts. In this research, we examined whether haptic priming (presenting an illusion of virtual human touch at the beginning of the virtual experience) and different locomotion techniques (either joystick or physical walking) might affect proxemic behavior in human users. The results of our study suggest that locomotion techniques can alter proxemic behavior in significant ways. Haptic priming did not appear to impact proxemic behavior, but did increase rapport and other subjective social measures. The results suggest that designers and developers of immersive training systems should carefully consider the impact of even simple design and fidelity choices on trainee reactions in social interactions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Haiwei; Chen, Samantha; Rosenberg, Evan Suma
Redirected Walking Strategies in Irregularly Shaped and Dynamic Physical Environments Proceedings Article
In: Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces, IEEE, Reutlingen, Germany, 2018.
@inproceedings{chen_redirected_2018,
title = {Redirected Walking Strategies in Irregularly Shaped and Dynamic Physical Environments},
author = {Haiwei Chen and Samantha Chen and Evan Suma Rosenberg},
url = {http://wevr.adalsimeone.me/2018/WEVR2018_Chen.pdf},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces},
publisher = {IEEE},
address = {Reutlingen, Germany},
abstract = {Redirected walking (RDW) is a Virtual Reality (VR) locomotion technique that enables the exploration of a large virtual environment (VE) within a small physical space via real walking. Thus far, the physical environment has generally been assumed to be rectangular, static, and free of obstacles. However, it is unlikely that real-world locations that may be used for VR fulfill these constraints. In addition, accounting for a dynamically changing physical environment allows RDWalgorithms to accommodate gradually mapped physical environments and moving objects. In this work, we introduce novel approaches that adapt RDWalgorithms to support irregularly shaped and dynamic physical environments. Our methods are divided into three categories: novel RDW Greedy Algorithms that provide a generalized approach for any VE, adapted RDW Planning Algorithms that provide an optimized solution when virtual path prediction is available, and last but not least, techniques for representing irregularly shaped and dynamic physical environments that can improve performance of RDW algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Phan, Thai; Ayanian, Nora; Honig, Wolfgang
Mixed Reality Collaboration Between Human-Agent Teams Proceedings Article
In: Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces, IEEE, Reutlingen, Germany, 2018, ISBN: 978-1-5386-3365-6.
@inproceedings{phan_mixed_2018,
title = {Mixed Reality Collaboration Between Human-Agent Teams},
author = {Thai Phan and Nora Ayanian and Wolfgang Honig},
url = {https://ieeexplore.ieee.org/document/8446542/#full-text-section},
doi = {10.1109/VR.2018.8446542},
isbn = {978-1-5386-3365-6},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces},
publisher = {IEEE},
address = {Reutlingen, Germany},
abstract = {Collaboration between two or more geographically dispersed teams has applications in research and training. In many cases specialized devices, such as robots, may need to be combined between the collaborating groups. However, it would be expensive or even impossible to collocate them at a single physical location. We describe the design of a mixed reality test bed which allows dispersed humans and physically embodied agents to collaborate within a single virtual environment. We demonstrate our approach using Unity’s networking architecture as well as open source robot software and hardware. In our scenario, a total of 3 humans and 6 drones must move through a narrow doorway while avoiding collisions in the physical spaces as well as virtual space.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nilsson, Niels; Peck, Tabitha; Bruder, Gerd; Hodgson, Eric; Serafin, Stefania; Suma, Evan; Whitton, Mary; Steinicke, Frank
15 Years of Research on Redirected Walking in Immersive Virtual Environments Journal Article
In: IEEE Computer Graphics and Applications, 2018, ISSN: 0272-1716.
@article{nilsson_15_2018,
title = {15 Years of Research on Redirected Walking in Immersive Virtual Environments},
author = {Niels Nilsson and Tabitha Peck and Gerd Bruder and Eric Hodgson and Stefania Serafin and Evan Suma and Mary Whitton and Frank Steinicke},
url = {http://ieeexplore.ieee.org/document/8255772/},
doi = {10.1109/MCG.2018.111125628},
issn = {0272-1716},
year = {2018},
date = {2018-01-01},
journal = {IEEE Computer Graphics and Applications},
abstract = {Virtual reality users wearing head-mounted displays can experience the illusion of walking in any direction for infinite distance while, in reality, they are walking a curvilinear path in physical space. This is accomplished by introducing unnoticeable rotations to the virtual environment—a technique called redirected walking. This paper gives an overview of the research that has been performed since redirected walking was first practically demonstrated 15 years ago.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin D; Kaimakis, Nicholas J; Krishnamachari, Madhusudhan; Swartout, William; Campbell, Julia; Anderson, Clinton; Davis, Dan M
MentorPal: Interactive Virtual Mentors Based on Real-Life STEM Professionals Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2017, a2z, Inc., Orlando, Florida, 2017.
@inproceedings{nye_mentorpal_2017,
title = {MentorPal: Interactive Virtual Mentors Based on Real-Life STEM Professionals},
author = {Benjamin D Nye and Nicholas J Kaimakis and Madhusudhan Krishnamachari and William Swartout and Julia Campbell and Clinton Anderson and Dan M Davis},
url = {http://www.iitsecdocs.com/search},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2017},
publisher = {a2z, Inc.},
address = {Orlando, Florida},
abstract = {In an ideal world, all students could meet STEM role models as they explore different careers. However, events such as career fairs do not scale well: professionals have limited time and effective mentors are not readily available in all fields. The result is that students’ understanding is minimal about what professionals in STEM fields do every day, what education is needed, and even what STEM fields exist. Moreover, since in-person interactions rely on finding people engaged in current STEM careers, students may form career goals for stagnant fields rather than growing fields (e.g., projected workforce needs). To address this problem, we are designing a scalable tablet-based app that gives students the opportunity to converse with interactive recordings of real-life STEM professionals. These conversational virtual agents will emulate a question-and-answer session with STEM professionals who have Navy ties and who are engaging, enthusiastic, and effective mentors. These interactions will allow students to have a lifelike informational interview with a virtual agent whose responses are directly drawn from a specific real professional’s video-recorded interview. This work differs from prior research on career guides by capturing the experiences of a collection of unique mentors, which should be more authentic and engaging than a generic agent or resource which speaks only about the average experience. This paper will discuss the process of creating the first such virtual STEM mentor prototype, including the development of an extensive mentoring question bank (approximately 500 questions); key mentoring topics that intersect STEM, DoD, and civilian life; techniques for cost-effective recording of remote mentors; and the process of training and verifying a natural language dialogue model for answering and suggesting career questions. Finally, we conclude with implications, strengths, and drawbacks of virtualizing the experience of talking with specific mentors, from the perspectives of efficacy, scalability, and maintainability.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Thomas, Jerald; Azmandian, Mahdi; Grunwald, Sonia; Le, Donna; Krum, David; Kang, Sin-Hwa; Rosenberg, Evan Suma
Effects of Personalized Avatar Texture Fidelity on Identity Recognition in Virtual Reality Proceedings Article
In: Proceedings of ICAT-EGVE 2017 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments, The Eurographics Association, Adelaide, Australia, 2017, ISBN: 978-3-03868-038-3.
@inproceedings{thomas_effects_2017,
title = {Effects of Personalized Avatar Texture Fidelity on Identity Recognition in Virtual Reality},
author = {Jerald Thomas and Mahdi Azmandian and Sonia Grunwald and Donna Le and David Krum and Sin-Hwa Kang and Evan Suma Rosenberg},
url = {https://diglib.eg.org/handle/10.2312/egve20171345},
doi = {10.2312/egve.20171345},
isbn = {978-3-03868-038-3},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of ICAT-EGVE 2017 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments},
publisher = {The Eurographics Association},
address = {Adelaide, Australia},
abstract = {Recent advances in 3D scanning, reconstruction, and animation techniques have made it possible to rapidly create photorealistic avatars based on real people. While it is now possible to create personalized avatars automatically with consumer-level technology, their visual fidelity still falls far short of 3D avatars created with professional cameras and manual artist effort. To evaluate the importance of investing resources in the creation of high-quality personalized avatars, we conducted an experiment to investigate the effects of varying their visual texture fidelity, specifically focusing on identity recognition of specific individuals. We designed two virtual reality experimental scenarios: (1) selecting a specific avatar from a virtual lineup and (2) searching for an avatar in a virtual crowd. Our results showed that visual fidelity had a significant impact on participants’ abilities to identify specific avatars from a lineup wearing a head-mounted display. We also investigated gender effects for both the participants and the confederates from which the avatars were created.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2023
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
Links | BibTeX | Tags: DTIC, MxR, UARC, Virtual Humans
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {DTIC, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Davis, Matt; Nye, Benjamin; Sinatra, Gale; Swartout, William; Sjӧberg, Molly; Porter, Molly; Nelson, David; Kennedy, Alana; Herrick, Imogen; Weeks, Danaan DeNeve; Lindsey, Emily
Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits Journal Article
In: Palaeontolgia Electronica, 2022, ISSN: 19353952, 10948074.
Links | BibTeX | Tags: AR, MxR, VR
@article{davis_designing_2022,
title = {Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits},
author = {Matt Davis and Benjamin Nye and Gale Sinatra and William Swartout and Molly Sjӧberg and Molly Porter and David Nelson and Alana Kennedy and Imogen Herrick and Danaan DeNeve Weeks and Emily Lindsey},
url = {https://palaeo-electronica.org/content/2022/3524-la-brea-tar-pits-paleoart},
doi = {10.26879/1191},
issn = {19353952, 10948074},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-15},
journal = {Palaeontolgia Electronica},
keywords = {AR, MxR, VR},
pubstate = {published},
tppubtype = {article}
}
2021
Nye, Benjamin; Nelson, David; Herrick, Imogen; Sinatra, Gale; Swartout, Bill; Porter, Molly; Davis, Matt; Lindsey, Emily
SCIENCE BIG and SMALL: Visiting the Ice Age through Miniature and Life-Sized Augmented Reality Experiences Proceedings Article
In: TMS Proceedings 2021, American Psychological Association, 2021.
Links | BibTeX | Tags: AR, MxR
@inproceedings{nye_science_2021,
title = {SCIENCE BIG and SMALL: Visiting the Ice Age through Miniature and Life-Sized Augmented Reality Experiences},
author = {Benjamin Nye and David Nelson and Imogen Herrick and Gale Sinatra and Bill Swartout and Molly Porter and Matt Davis and Emily Lindsey},
url = {https://tmb.apaopen.org/pub/djue4kjf},
doi = {10.1037/tms0000106},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-21},
booktitle = {TMS Proceedings 2021},
publisher = {American Psychological Association},
keywords = {AR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Proceedings Article
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Soleymani, Mohammad; Stefanov, Kalin; Kang, Sin-Hwa; Ondras, Jan; Gratch, Jonathan
Multimodal Analysis and Estimation of Intimate Self-Disclosure Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19, pp. 59–68, ACM Press, Suzhou, China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{soleymani_multimodal_2019,
title = {Multimodal Analysis and Estimation of Intimate Self-Disclosure},
author = {Mohammad Soleymani and Kalin Stefanov and Sin-Hwa Kang and Jan Ondras and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3340555.3353737},
doi = {10.1145/3340555.3353737},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19},
pages = {59–68},
publisher = {ACM Press},
address = {Suzhou, China},
abstract = {Self-disclosure to others has a proven benefit for one’s mental health. It is shown that disclosure to computers can be similarly beneficial for emotional and psychological well-being. In this paper, we analyzed verbal and nonverbal behavior associated with self-disclosure in two datasets containing structured human-human and human-agent interviews from more than 200 participants. Correlation analysis of verbal and nonverbal behavior revealed that linguistic features such as affective and cognitive content in verbal behavior, and nonverbal behavior such as head gestures are associated with intimate self-disclosure. A multimodal deep neural network was developed to automatically estimate the level of intimate self-disclosure from verbal and nonverbal behavior. Between modalities, verbal behavior was the best modality for estimating self-disclosure within-corpora achieving r = 0.66. However, the cross-corpus evaluation demonstrated that nonverbal behavior can outperform language modality in cross-corpus evaluation. Such automatic models can be deployed in interactive virtual agents or social robots to evaluate rapport and guide their conversational strategy.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Traum, David
Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies Proceedings Article
In: Proceedings of the 9th Language and Technology Conference, LTC, Poznań, Poland, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_developing_2019,
title = {Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and David Traum},
url = {http://www-scf.usc.edu/ nasihati/publications/HLTCEM_2019.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 9th Language and Technology Conference},
publisher = {LTC},
address = {Poznań, Poland},
abstract = {Search and rescue missions involving robots face multiple challenges. The ratio of operators to robots is frequently one to one or higher, operators tasked with robots must contend with cognitive overload for long periods, and the robots themselves may be discomfiting to located survivors. To improve on the current state, we propose a swarm of robots equipped with natural language abilities and guided by a central virtual “spokesperson” able to access “plays”. The spokesperson may assist the operator with tasking the robots in their exploration of a zone, which allows the operator to maintain a safe distance. The use of multiple robots enables rescue personnel to cover a larger swath of ground, and the natural language component allows the robots to communicate with survivors located on site. This capability frees the operator to handle situations requiring personal attention, and overall can accelerate the location and assistance of survivors. In order to develop this system, we are creating a virtual reality simulation, in order to conduct a study and analysis of how humans communicate with these swarms of robots. The data collected from this experiment will inform how to best design emergency response swarm robots that are effectively able to communicate with the humans around them.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Panlener, William; Krum, David M; Jones, J Adam
Effects of Horizontal Field of View Extension on Spatial Judgments in Virtual Reality Proceedings Article
In: Proceedings of the IEEE Southeast Conference 2019, pp. 7, IEEE, Huntsville, AL, 2019.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{panlener_effects_2019,
title = {Effects of Horizontal Field of View Extension on Spatial Judgments in Virtual Reality},
author = {William Panlener and David M Krum and J Adam Jones},
url = {https://www.researchgate.net/publication/332448571_Effects_of_Horizontal_Field_of_View_Extension_on_Spatial_Judgments_in_Virtual_Reality},
year = {2019},
date = {2019-04-01},
booktitle = {Proceedings of the IEEE Southeast Conference 2019},
pages = {7},
publisher = {IEEE},
address = {Huntsville, AL},
abstract = {It is known that observers tend to misperceive distances during spatial judgment tasks in virtual reality. Virtual environments restrict field of view as compared to real environments. We explore whether horizontal field of view restriction affects real or perceived ocular convergence. We also explore effects that the size and symmetry of field of view may have. We find that convergence is not impacted by altering field of view, but the subjective median plane is affected. We also find that distance is better estimated in wider fields of view, and that lateral bias in estimation is correlated to the symmetry of the field.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Dennison, Mark S.; Krum, David M.
Unifying Research to Address Motion Sickness Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 1858–1859, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72811-377-7.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC
@inproceedings{dennison_unifying_2019,
title = {Unifying Research to Address Motion Sickness},
author = {Mark S. Dennison and David M. Krum},
url = {https://ieeexplore.ieee.org/document/8798297/},
doi = {10.1109/VR.2019.8798297},
isbn = {978-1-72811-377-7},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
pages = {1858–1859},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Be it discussed as cybersickness, immersive sickness, simulator sickness, or virtual reality sickness, the ill effects of visuo-vestibular mismatch in immersive environments are of great concern for the wider adoption of virtual reality and related technologies. In this position paper, we discuss a unified research approach that may address motion sickness and identify critical research topics.},
keywords = {ARL, DoD, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sinhwa; Chanenson, Jake; Cowal, Peter; Weaver, Madeleine
Advancing Ethical Decision Making in Virtual Reality Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 2, IEEE, Osaka, Japan, 2019.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@inproceedings{kang_advancing_2019,
title = {Advancing Ethical Decision Making in Virtual Reality},
author = {Sinhwa Kang and Jake Chanenson and Peter Cowal and Madeleine Weaver},
url = {https://ieeexplore.ieee.org/document/8798151},
doi = {10.1109/VR.2019.8798151},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
pages = {2},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Virtual reality (VR) has been widely utilized for training and education purposes because of pedagogical, safety, and economic benefits. The investigation of moral judgment is a particularly interesting VR application, related to training. For this study, we designed a withinsubject experiment manipulating the role of study participants in a Trolley Dilemma scenario: either victim or driver. We conducted a pilot study with four participants and describe preliminary results and implications in this poster.},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bernardet, Ulysses; Kanq, Sin-Hwa; Feng, Andrew; DiPaola, Steve; Shapiro, Ari
Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study Proceedings Article
In: 2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE), pp. 1–9, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72813-219-8.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{bernardet_speech_2019,
title = {Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study},
author = {Ulysses Bernardet and Sin-Hwa Kanq and Andrew Feng and Steve DiPaola and Ari Shapiro},
url = {https://ieeexplore.ieee.org/document/8714737/},
doi = {10.1109/VHCIE.2019.8714737},
isbn = {978-1-72813-219-8},
year = {2019},
date = {2019-03-01},
booktitle = {2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)},
pages = {1–9},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Human speech production requires the dynamic regulation of air through the vocal system. While virtual character systems commonly are capable of speech output, they rarely take breathing during speaking – speech breathing – into account. We believe that integrating dynamic speech breathing systems in virtual characters can significantly contribute to augmenting their realism. Here, we present a novel control architecture aimed at generating speech breathing in virtual characters. This architecture is informed by behavioral, linguistic and anatomical knowledge of human speech breathing. Based on textual input and controlled by a set of lowand high-level parameters, the system produces dynamic signals in real-time that control the virtual character’s anatomy (thorax, abdomen, head, nostrils, and mouth) and sound production (speech and breathing).},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, J. Adam; Hopper, Jonathan E.; Bolas, Mark T.; Krum, David M.
Orientation Perception in Real and Virtual Environments Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, pp. 2050 – 2060, 2019, ISSN: 1077-2626, 1941-0506, 2160-9306.
Abstract | Links | BibTeX | Tags: MxR
@article{jones_orientation_2019,
title = {Orientation Perception in Real and Virtual Environments},
author = {J. Adam Jones and Jonathan E. Hopper and Mark T. Bolas and David M. Krum},
url = {https://ieeexplore.ieee.org/document/8642384/},
doi = {10.1109/TVCG.2019.2898798},
issn = {1077-2626, 1941-0506, 2160-9306},
year = {2019},
date = {2019-02-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {2050 – 2060},
abstract = {Spatial perception in virtual environments has been a topic of intense research. Arguably, the majority of this work has focused on distance perception. However, orientation perception is also an important factor. In this paper, we systematically investigate allocentric orientation judgments in both real and virtual contexts over the course of four experiments. A pattern of sinusoidal judgment errors known to exist in 2D perspective displays is found to persist in immersive virtual environments. This pattern also manifests itself in a real world setting using two differing judgment methods. The findings suggest the presence of a radial anisotropy that persists across viewing contexts. Additionally, there is some evidence to suggest that observers have multiple strategies for processing orientations but further investigation is needed to fully describe this phenomenon. We also offer design suggestions for 3D user interfaces where users may perform orientation judgments.},
keywords = {MxR},
pubstate = {published},
tppubtype = {article}
}
2018
Gil, Yolanda; Hill, Mary; Horel, John; Hsu, Leslie; Kinter, Jim; Knoblock, Craig; Krum, David; Kumar, Vipin; Lermusiaux, Pierre; Liu, Yan; North, Chris; Pierce, Suzanne A.; Pankratius, Victor; Peters, Shanan; Plale, Beth; Pope, Allen; Ravela, Sai; Restrepo, Juan; Ridley, Aaron; Samet, Hanan; Shekhar, Shashi; Babaie, Hassan; Banerjee, Arindam; Borne, Kirk; Bust, Gary; Cheatham, Michelle; Ebert-Uphoff, Imme; Gomes, Carla
Intelligent systems for geosciences: an essential research agenda Journal Article
In: Communications of the ACM, vol. 62, no. 1, pp. 76–84, 2018, ISSN: 00010782.
Abstract | Links | BibTeX | Tags: MxR
@article{gil_intelligent_2018,
title = {Intelligent systems for geosciences: an essential research agenda},
author = {Yolanda Gil and Mary Hill and John Horel and Leslie Hsu and Jim Kinter and Craig Knoblock and David Krum and Vipin Kumar and Pierre Lermusiaux and Yan Liu and Chris North and Suzanne A. Pierce and Victor Pankratius and Shanan Peters and Beth Plale and Allen Pope and Sai Ravela and Juan Restrepo and Aaron Ridley and Hanan Samet and Shashi Shekhar and Hassan Babaie and Arindam Banerjee and Kirk Borne and Gary Bust and Michelle Cheatham and Imme Ebert-Uphoff and Carla Gomes},
url = {http://dl.acm.org/citation.cfm?doid=3301004.3192335},
doi = {10.1145/3192335},
issn = {00010782},
year = {2018},
date = {2018-12-01},
journal = {Communications of the ACM},
volume = {62},
number = {1},
pages = {76–84},
abstract = {A research agenda for intelligent systems that will result in fundamental new capabilities for understanding the Earth system.},
keywords = {MxR},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen
Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing Proceedings Article
In: Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018, pp. 17–22, ACM Press, Beijing, China, 2018, ISBN: 978-1-4503-6376-1.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, MxR, UARC
@inproceedings{kang_socio-cultural_2018,
title = {Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang},
url = {http://dl.acm.org/citation.cfm?doid=3205326.3205348},
doi = {10.1145/3205326.3205348},
isbn = {978-1-4503-6376-1},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018},
pages = {17–22},
publisher = {ACM Press},
address = {Beijing, China},
abstract = {We explored how users perceive virtual characters that performed the role of a counseling interviewer, while presenting different levels of social class, as well as single or multi-tasking behavior. To investigate this subject, we designed a 2x2 experiment (tasking type and social class of the virtual counseling interviewer). In the experiment, participants experienced the counseling interview interactions over video conferencing on a smartphone. We measured user responses to and perceptions of the virtual human interviewer. The results demonstrate that the tasking types and social class of the virtual counselor affected user responses to and perceptions of the virtual counselor. The results offer insight into the design and development of effective, realistic, and believable virtual human counselors. Furthermore, the results also address current social questions about how smartphones might mediate social interactions, including human-agent interactions.},
keywords = {ARL, DoD, MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Trout, Theron; Russell, Stephen M.; Harrison, Andre V.; Spicer, Ryan; Dennison, Mark S.; Thomas, Jerald; Rosenberg, Evan Suma
Collaborative mixed reality (MxR) and networked decision making Proceedings Article
In: Next-Generation Analyst VI, pp. 21, SPIE, Orlando, Florida, 2018, ISBN: 978-1-5106-1817-6 978-1-5106-1818-3.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC
@inproceedings{trout_collaborative_2018,
title = {Collaborative mixed reality (MxR) and networked decision making},
author = {Theron Trout and Stephen M. Russell and Andre V. Harrison and Ryan Spicer and Mark S. Dennison and Jerald Thomas and Evan Suma Rosenberg},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10653/2309959/Collaborative-mixed-reality-MxR-and-networked-decision-making/10.1117/12.2309959.full},
doi = {10.1117/12.2309959},
isbn = {978-1-5106-1817-6 978-1-5106-1818-3},
year = {2018},
date = {2018-04-01},
booktitle = {Next-Generation Analyst VI},
pages = {21},
publisher = {SPIE},
address = {Orlando, Florida},
abstract = {Collaborative decision-making remains a significant research challenge that is made even more complicated in real-time or tactical problem-contexts. Advances in technology have dramatically assisted the ability for computers and networks to improve the decision-making process (i.e. intelligence, design, and choice). In the intelligence phase of decision making, mixed reality (MxR) has shown a great deal of promise through implementations of simulation and training. However little research has focused on an implementation of MxR to support the entire scope of the decision cycle, let alone collaboratively and in a tactical context. This paper presents a description of the design and initial implementation for the Defense Integrated Collaborative Environment (DICE), an experimental framework for supporting theoretical and empirical research on MxR for tactical decision-making support.},
keywords = {ARL, DoD, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M; Kang, Sin-Hwa; Phan, Thai
Influences on the Elicitation of Interpersonal Space with Virtual Humans Proceedings Article
In: Proceedings of the 2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), IEEE, Tuebingen/Reutlingen, Germany, 2018, ISBN: 978-1-5386-3365-6.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@inproceedings{krum_influences_2018,
title = {Influences on the Elicitation of Interpersonal Space with Virtual Humans},
author = {David M Krum and Sin-Hwa Kang and Thai Phan},
url = {https://ieeexplore.ieee.org/document/8446235/#full-text-section},
doi = {10.1109/VR.2018.8446235},
isbn = {978-1-5386-3365-6},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
publisher = {IEEE},
address = {Tuebingen/Reutlingen, Germany},
abstract = {The emergence of low cost virtual and augmented reality systems has encouraged the development of immersive training applications for medical, military, and many other fields. Many of the training scenarios for these various fields may require the presentation of realistic interactions with virtual humans. It is thus vital to determine the critical factors of fidelity required in those interactions to elicit naturalistic behavior on the part of trainees. Negative training may occur if trainees are inadvertently influenced to react in ways that are unexpected and unnatural, hindering proper learning and transfer of skills and knowledge back into real world contexts. In this research, we examined whether haptic priming (presenting an illusion of virtual human touch at the beginning of the virtual experience) and different locomotion techniques (either joystick or physical walking) might affect proxemic behavior in human users. The results of our study suggest that locomotion techniques can alter proxemic behavior in significant ways. Haptic priming did not appear to impact proxemic behavior, but did increase rapport and other subjective social measures. The results suggest that designers and developers of immersive training systems should carefully consider the impact of even simple design and fidelity choices on trainee reactions in social interactions.},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Haiwei; Chen, Samantha; Rosenberg, Evan Suma
Redirected Walking Strategies in Irregularly Shaped and Dynamic Physical Environments Proceedings Article
In: Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces, IEEE, Reutlingen, Germany, 2018.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{chen_redirected_2018,
title = {Redirected Walking Strategies in Irregularly Shaped and Dynamic Physical Environments},
author = {Haiwei Chen and Samantha Chen and Evan Suma Rosenberg},
url = {http://wevr.adalsimeone.me/2018/WEVR2018_Chen.pdf},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces},
publisher = {IEEE},
address = {Reutlingen, Germany},
abstract = {Redirected walking (RDW) is a Virtual Reality (VR) locomotion technique that enables the exploration of a large virtual environment (VE) within a small physical space via real walking. Thus far, the physical environment has generally been assumed to be rectangular, static, and free of obstacles. However, it is unlikely that real-world locations that may be used for VR fulfill these constraints. In addition, accounting for a dynamically changing physical environment allows RDWalgorithms to accommodate gradually mapped physical environments and moving objects. In this work, we introduce novel approaches that adapt RDWalgorithms to support irregularly shaped and dynamic physical environments. Our methods are divided into three categories: novel RDW Greedy Algorithms that provide a generalized approach for any VE, adapted RDW Planning Algorithms that provide an optimized solution when virtual path prediction is available, and last but not least, techniques for representing irregularly shaped and dynamic physical environments that can improve performance of RDW algorithms.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Phan, Thai; Ayanian, Nora; Honig, Wolfgang
Mixed Reality Collaboration Between Human-Agent Teams Proceedings Article
In: Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces, IEEE, Reutlingen, Germany, 2018, ISBN: 978-1-5386-3365-6.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{phan_mixed_2018,
title = {Mixed Reality Collaboration Between Human-Agent Teams},
author = {Thai Phan and Nora Ayanian and Wolfgang Honig},
url = {https://ieeexplore.ieee.org/document/8446542/#full-text-section},
doi = {10.1109/VR.2018.8446542},
isbn = {978-1-5386-3365-6},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces},
publisher = {IEEE},
address = {Reutlingen, Germany},
abstract = {Collaboration between two or more geographically dispersed teams has applications in research and training. In many cases specialized devices, such as robots, may need to be combined between the collaborating groups. However, it would be expensive or even impossible to collocate them at a single physical location. We describe the design of a mixed reality test bed which allows dispersed humans and physically embodied agents to collaborate within a single virtual environment. We demonstrate our approach using Unity’s networking architecture as well as open source robot software and hardware. In our scenario, a total of 3 humans and 6 drones must move through a narrow doorway while avoiding collisions in the physical spaces as well as virtual space.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nilsson, Niels; Peck, Tabitha; Bruder, Gerd; Hodgson, Eric; Serafin, Stefania; Suma, Evan; Whitton, Mary; Steinicke, Frank
15 Years of Research on Redirected Walking in Immersive Virtual Environments Journal Article
In: IEEE Computer Graphics and Applications, 2018, ISSN: 0272-1716.
Abstract | Links | BibTeX | Tags: MxR, UARC
@article{nilsson_15_2018,
title = {15 Years of Research on Redirected Walking in Immersive Virtual Environments},
author = {Niels Nilsson and Tabitha Peck and Gerd Bruder and Eric Hodgson and Stefania Serafin and Evan Suma and Mary Whitton and Frank Steinicke},
url = {http://ieeexplore.ieee.org/document/8255772/},
doi = {10.1109/MCG.2018.111125628},
issn = {0272-1716},
year = {2018},
date = {2018-01-01},
journal = {IEEE Computer Graphics and Applications},
abstract = {Virtual reality users wearing head-mounted displays can experience the illusion of walking in any direction for infinite distance while, in reality, they are walking a curvilinear path in physical space. This is accomplished by introducing unnoticeable rotations to the virtual environment—a technique called redirected walking. This paper gives an overview of the research that has been performed since redirected walking was first practically demonstrated 15 years ago.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {article}
}
2017
Nye, Benjamin D; Kaimakis, Nicholas J; Krishnamachari, Madhusudhan; Swartout, William; Campbell, Julia; Anderson, Clinton; Davis, Dan M
MentorPal: Interactive Virtual Mentors Based on Real-Life STEM Professionals Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2017, a2z, Inc., Orlando, Florida, 2017.
Abstract | Links | BibTeX | Tags: Learning Sciences, MxR, UARC
@inproceedings{nye_mentorpal_2017,
title = {MentorPal: Interactive Virtual Mentors Based on Real-Life STEM Professionals},
author = {Benjamin D Nye and Nicholas J Kaimakis and Madhusudhan Krishnamachari and William Swartout and Julia Campbell and Clinton Anderson and Dan M Davis},
url = {http://www.iitsecdocs.com/search},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2017},
publisher = {a2z, Inc.},
address = {Orlando, Florida},
abstract = {In an ideal world, all students could meet STEM role models as they explore different careers. However, events such as career fairs do not scale well: professionals have limited time and effective mentors are not readily available in all fields. The result is that students’ understanding is minimal about what professionals in STEM fields do every day, what education is needed, and even what STEM fields exist. Moreover, since in-person interactions rely on finding people engaged in current STEM careers, students may form career goals for stagnant fields rather than growing fields (e.g., projected workforce needs). To address this problem, we are designing a scalable tablet-based app that gives students the opportunity to converse with interactive recordings of real-life STEM professionals. These conversational virtual agents will emulate a question-and-answer session with STEM professionals who have Navy ties and who are engaging, enthusiastic, and effective mentors. These interactions will allow students to have a lifelike informational interview with a virtual agent whose responses are directly drawn from a specific real professional’s video-recorded interview. This work differs from prior research on career guides by capturing the experiences of a collection of unique mentors, which should be more authentic and engaging than a generic agent or resource which speaks only about the average experience. This paper will discuss the process of creating the first such virtual STEM mentor prototype, including the development of an extensive mentoring question bank (approximately 500 questions); key mentoring topics that intersect STEM, DoD, and civilian life; techniques for cost-effective recording of remote mentors; and the process of training and verifying a natural language dialogue model for answering and suggesting career questions. Finally, we conclude with implications, strengths, and drawbacks of virtualizing the experience of talking with specific mentors, from the perspectives of efficacy, scalability, and maintainability.},
keywords = {Learning Sciences, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Thomas, Jerald; Azmandian, Mahdi; Grunwald, Sonia; Le, Donna; Krum, David; Kang, Sin-Hwa; Rosenberg, Evan Suma
Effects of Personalized Avatar Texture Fidelity on Identity Recognition in Virtual Reality Proceedings Article
In: Proceedings of ICAT-EGVE 2017 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments, The Eurographics Association, Adelaide, Australia, 2017, ISBN: 978-3-03868-038-3.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@inproceedings{thomas_effects_2017,
title = {Effects of Personalized Avatar Texture Fidelity on Identity Recognition in Virtual Reality},
author = {Jerald Thomas and Mahdi Azmandian and Sonia Grunwald and Donna Le and David Krum and Sin-Hwa Kang and Evan Suma Rosenberg},
url = {https://diglib.eg.org/handle/10.2312/egve20171345},
doi = {10.2312/egve.20171345},
isbn = {978-3-03868-038-3},
year = {2017},
date = {2017-11-01},
booktitle = {Proceedings of ICAT-EGVE 2017 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments},
publisher = {The Eurographics Association},
address = {Adelaide, Australia},
abstract = {Recent advances in 3D scanning, reconstruction, and animation techniques have made it possible to rapidly create photorealistic avatars based on real people. While it is now possible to create personalized avatars automatically with consumer-level technology, their visual fidelity still falls far short of 3D avatars created with professional cameras and manual artist effort. To evaluate the importance of investing resources in the creation of high-quality personalized avatars, we conducted an experiment to investigate the effects of varying their visual texture fidelity, specifically focusing on identity recognition of specific individuals. We designed two virtual reality experimental scenarios: (1) selecting a specific avatar from a virtual lineup and (2) searching for an avatar in a virtual crowd. Our results showed that visual fidelity had a significant impact on participants’ abilities to identify specific avatars from a lineup wearing a head-mounted display. We also investigated gender effects for both the participants and the confederates from which the avatars were created.},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Chih-Fan; Bolas, Mark; Rosenberg, Evan Suma
VIEW-DEPENDENT VIRTUAL REALITY CONTENT FROM RGB-D IMAGES Proceedings Article
In: Proceedings of ICIP 2017, IEEE, Beijing, China, 2017.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{chen_view-dependent_2017,
title = {VIEW-DEPENDENT VIRTUAL REALITY CONTENT FROM RGB-D IMAGES},
author = {Chih-Fan Chen and Mark Bolas and Evan Suma Rosenberg},
url = {http://people.ict.usc.edu/ suma/papers/chen-icip2017},
year = {2017},
date = {2017-09-01},
booktitle = {Proceedings of ICIP 2017},
publisher = {IEEE},
address = {Beijing, China},
abstract = {High-fidelity virtual content is essential for the creation of compelling and effective virtual reality (VR) experiences. However, creating photorealistic content is not easy, and handcrafting detailed 3D models can be time and labor intensive. Structured camera arrays, such as light-stages, can scan and reconstruct high-fidelity virtual models, but the expense makes this technology impractical for most users. In this paper, we present a complete end-to-end pipeline for the capture, processing, and rendering of view-dependent 3D models in virtual reality from a single consumer-grade depth camera. The geometry model and the camera trajectories are automatically reconstructed and optimized from a RGB-D imagesequencecapturedoffline. Basedonthehead-mounted display(HMD)position,thethreeclosestimagesareselected for real-time rendering and fused together to smooth the transition between viewpoints. The specular reflections and light-burst effects can also be preserved and reproduced. We confirmed that our method does not require technical background knowledge by testing our system with data captured by non-expert operators.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Rosenberg, Evan Suma; Shapiro, Ari
Just-in-time, viable, 3D avatars from scans Journal Article
In: Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents), vol. 28, no. 3-4, 2017.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@article{feng_just--time_2017,
title = {Just-in-time, viable, 3D avatars from scans},
author = {Andrew Feng and Evan Suma Rosenberg and Ari Shapiro},
url = {http://onlinelibrary.wiley.com/doi/10.1002/cav.1769/epdf},
doi = {10.1002/cav.1769},
year = {2017},
date = {2017-05-01},
journal = {Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents)},
volume = {28},
number = {3-4},
abstract = {We demonstrate a system that can generate a photorealistic, interactive 3-D character from a human subject that is capable of movement, emotion, speech, and gesture in less than 20 min without the need for 3-D artist intervention or specialized technical knowledge through a near automatic process. Our method uses mostly commodity or off-the-shelf hardware. We demonstrate the just-in-time use of generating such 3-D models for virtual and augmented reality, games, simulation, and communication. We anticipate that the inexpensive generation of such photorealistic models will be useful in many venues where a just-in-time 3-D reconstructions of digital avatars that resemble particular human subjects is necessary.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Krum, David M.; Kang, Sin-Hwa; Phan, Thai; Dukes, Lauren Cairco; Bolas, Mark
Social Impact of Enhanced Gaze Presentation Using Head Mounted Projection Proceedings Article
In: Proceedings of the Human-Computer Interaction International Conference, Springer International Publishing, Vancouver, Canada, 2017, ISBN: 978-3-319-58696-0 978-3-319-58697-7.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@inproceedings{krum_social_2017,
title = {Social Impact of Enhanced Gaze Presentation Using Head Mounted Projection},
author = {David M. Krum and Sin-Hwa Kang and Thai Phan and Lauren Cairco Dukes and Mark Bolas},
url = {https://link.springer.com/chapter/10.1007/978-3-319-58697-7_5},
isbn = {978-3-319-58696-0 978-3-319-58697-7},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Human-Computer Interaction International Conference},
publisher = {Springer International Publishing},
address = {Vancouver, Canada},
abstract = {Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals in a group of observers. This could result in perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers in the room regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing a weapon. In this paper, we discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan P.; Russell, Stephen M.; Rosenberg, Evan Suma
The mixed reality of things: emerging challenges for human-information interaction Proceedings Article
In: Proceedings Volume 10207, Next-Generation Analyst V, SPIE, Anaheim, CA, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC
@inproceedings{spicer_mixed_2017,
title = {The mixed reality of things: emerging challenges for human-information interaction},
author = {Ryan P. Spicer and Stephen M. Russell and Evan Suma Rosenberg},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2268004},
doi = {10.1117/12.2268004},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings Volume 10207, Next-Generation Analyst V},
publisher = {SPIE},
address = {Anaheim, CA},
abstract = {Virtual and mixed reality technology has advanced tremendously over the past several years. This nascent medium has the potential to transform how people communicate over distance, train for unfamiliar tasks, operate in challenging environments, and how they visualize, interact, and make decisions based on complex data. At the same time, the marketplace has experienced a proliferation of network-connected devices and generalized sensors that are becoming increasingly accessible and ubiquitous. As the "Internet of Things" expands to encompass a predicted 50 billion connected devices by 2020, the volume and complexity of information generated in pervasive and virtualized environments will continue to grow exponentially. The convergence of these trends demands a theoretically grounded research agenda that can address emerging challenges for human-information interaction (HII). Virtual and mixed reality environments can provide controlled settings where HII phenomena can be observed and measured, new theories developed, and novel algorithms and interaction techniques evaluated. In this paper, we describe the intersection of pervasive computing with virtual and mixed reality, identify current research gaps and opportunities to advance the fundamental understanding of HII, and discuss implications for the design and development of cyber-human systems for both military and civilian use.},
keywords = {ARL, DoD, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Rosenberg, Evan Suma; Shapiro, Ari
Just-in-time, viable, 3D avatars from scans Journal Article
In: Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents), vol. 28, no. 3-4, 2017.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@article{feng_just–time_2017,
title = {Just-in-time, viable, 3D avatars from scans},
author = {Andrew Feng and Evan Suma Rosenberg and Ari Shapiro},
url = {http://onlinelibrary.wiley.com/doi/10.1002/cav.1769/epdf},
doi = {10.1002/cav.1769},
year = {2017},
date = {2017-05-01},
journal = {Computer Animation and Virtual Worlds (Special Issue on Computer Animation and Social Agents)},
volume = {28},
number = {3-4},
abstract = {We demonstrate a system that can generate a photorealistic, interactive 3-D character from a human subject that is capable of movement, emotion, speech, and gesture in less than 20 min without the need for 3-D artist intervention or specialized technical knowledge through a near automatic process. Our method uses mostly commodity or off-the-shelf hardware. We demonstrate the just-in-time use of generating such 3-D models for virtual and augmented reality, games, simulation, and communication. We anticipate that the inexpensive generation of such photorealistic models will be useful in many venues where a just-in-time 3-D reconstructions of digital avatars that resemble particular human subjects is necessary.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen; Amir, Ori; Lin, Rebecca
Social influence of humor in virtual human counselor's self-disclosure Journal Article
In: Computer Animation and Virtual Worlds, vol. 28, no. 3-4, 2017, ISSN: 15464261.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, MxR, UARC
@article{kang_social_2017,
title = {Social influence of humor in virtual human counselor's self-disclosure},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang and Ori Amir and Rebecca Lin},
url = {http://doi.wiley.com/10.1002/cav.1763},
doi = {10.1002/cav.1763},
issn = {15464261},
year = {2017},
date = {2017-04-01},
journal = {Computer Animation and Virtual Worlds},
volume = {28},
number = {3-4},
abstract = {We explored the social influence of humor in a virtual human counselor's selfdisclosure while also varying the ethnicity of the virtual counselor. In a 2 × 3 experiment (humor and ethnicity of the virtual human counselor), participants experienced counseling interview interactions via Skype on a smartphone. We measured user responses to and perceptions of the virtual human counselor. The results demonstrate that humor positively affects user responses to and perceptions of a virtual counselor. The results further suggest that matching styles of humor with a virtual counselor's ethnicity influences user responses and perceptions. The results offer insight into the effective design and development of realistic and believable virtual human counselors. Furthermore, they illuminate the potential use of humor to enhance self‐disclosure in human–agent interactions.},
keywords = {ARL, DoD, MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {article}
}
Krum, David M.; Phan, Thai; Kang, Sin-Hwa
Motor Adaptation in Response to Scaling and Diminished Feedback in Virtual Reality Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 233–234, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@inproceedings{krum_motor_2017,
title = {Motor Adaptation in Response to Scaling and Diminished Feedback in Virtual Reality},
author = {David M. Krum and Thai Phan and Sin-Hwa Kang},
url = {http://ieeexplore.ieee.org/document/7892262/#full-text-section},
doi = {10.1109/VR.2017.7892262},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {233–234},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {As interaction techniques involving scaling of motor space in virtual reality are becoming more prevalent, it is important to understand how individuals adapt to such scalings and how they re-adapt back to non-scaled norms. This preliminary work examines how individuals, performing a targeted ball throwing task, adapted to addition and removal of a translational scaling of the ball’s forward flight. This was examined under various conditions: flight of the ball shown with no delay, hidden flight of the ball with no delay, and hidden flight with a 2 second delay. Hiding the ball’s flight, as well as the delay, created disruptions in the ability of the participants to perform the task and adapt to new scaling conditions.},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan; Anglin, Julia; Krum, David M.; Liew, Sook-Lei
REINVENT: A Low-Cost, Virtual Reality Brain-Computer Interface for Severe Stroke Upper Limb Motor Recovery Proceedings Article
In: Proceedings of the IEEE Virtual Reality Conference, pp. 385–386, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{spicer_reinvent_2017,
title = {REINVENT: A Low-Cost, Virtual Reality Brain-Computer Interface for Severe Stroke Upper Limb Motor Recovery},
author = {Ryan Spicer and Julia Anglin and David M. Krum and Sook-Lei Liew},
url = {http://ieeexplore.ieee.org/abstract/document/7892338/},
doi = {10.1109/VR.2017.7892338},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of the IEEE Virtual Reality Conference},
pages = {385–386},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {There are few effective treatments for rehabilitation of severe motor impairment after stroke. We developed a novel closed-loop neurofeedback system called REINVENT to promote motor recovery in this population. REINVENT (Rehabilitation Environment using the Integration of Neuromuscular-based Virtual Enhancements for Neural Training) harnesses recent advances in neuroscience, wearable sensors, and virtual technology and integrates low-cost electroencephalography (EEG) and electromyography (EMG) sensors with feedback in a head-mounted virtual reality display (VR) to provide neurofeedback when an individual's neuromuscular signals indicate movement attempt, even in the absence of actual movement. Here we describe the REINVENT prototype and provide evidence of the feasibility and safety of using REINVENT with older adults.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Choromanski, Igor; Neubauer, Catherine; Krum, David M.; Spicer, Ryan; Campbell, Julia
Mixed Reality Training for Tank Platoon Leader Communication Skills Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 333–334, IEEE, Los Angeles, CA, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC
@inproceedings{khooshabeh_mixed_2017,
title = {Mixed Reality Training for Tank Platoon Leader Communication Skills},
author = {Peter Khooshabeh and Igor Choromanski and Catherine Neubauer and David M. Krum and Ryan Spicer and Julia Campbell},
url = {http://ieeexplore.ieee.org/document/7892312/#full-text-section},
doi = {10.1109/VR.2017.7892312},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {333–334},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Here we describe the design and usability evaluation of a mixed reality prototype to simulate the role of a tank platoon leader, who is an individual who not only is a tank commander, but also directs a platoon of three other tanks with their own respective tank commanders. The domain of tank commander training has relied on physical simulators of the actual Abrams tank and encapsulates the whole crew. The TALK-ON system we describe here focuses on training communication skills of the leader in a simulated tank crew. We report results from a usability evaluation and discuss how they will inform our future work for collective tank training.},
keywords = {ARL, DoD, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Ard, Tyler; Krum, David M.; Phan, Thai; Duncan, Dominique; Essex, Ryan; Bolas, Mark; Toga, Arthur
NIVR: Neuro Imaging in Virtual Reality Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 465–466, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{ard_nivr_2017,
title = {NIVR: Neuro Imaging in Virtual Reality},
author = {Tyler Ard and David M. Krum and Thai Phan and Dominique Duncan and Ryan Essex and Mark Bolas and Arthur Toga},
url = {http://ieeexplore.ieee.org/abstract/document/7892381/},
doi = {10.1109/VR.2017.7892381},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {465–466},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Visualization is a critical component of neuroimaging, and how to best view data that is naturally three dimensional is a long standing question in neuroscience. Many approaches, programs, and techniques have been developed specifically for neuroimaging. However, exploration of 3D information through a 2D screen is inherently limited. Many neuroscientific researchers hope that with the recent commercialization and popularization of VR, it can offer the next-step in data visualization and exploration. Neuro Imaging in Virtual Reality (NIVR), is a visualization suite that employs various immersive visualizations to represent neuroimaging information in VR. Some established techniques, such as raymarching volume visualization, are paired with newer techniques, such as near-field rendering, to provide a broad basis of how we can leverage VR to improve visualization and navigation of neuroimaging data. Several of the neuroscientific visualization approaches presented are, to our knowledge, the first of their kind. NIVR offers not only an exploration of neuroscientific data visualization, but also a tool to expose and educate the public regarding recent advancements in the field of neuroimaging. By providing an engaging experience to explore new techniques and discoveries in neuroimaging, we hope to spark scientific interest through a broad audience. Furthermore, neuroimaging offers deep and expansive datasets; a single scan can involve several gigabytes of information. Visualization and exploration of this type of information can be challenging, and real-time exploration of this information in VR even more so. NIVR explores pathways which make this possible, and offers preliminary stereo visualizations of these types of massive data.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Rosenberg, Evan Suma
An Evaluation of Strategies for Two-User Redirected Walking in Shared Physical Spaces Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 91–98, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{azmandian_evaluation_2017,
title = {An Evaluation of Strategies for Two-User Redirected Walking in Shared Physical Spaces},
author = {Mahdi Azmandian and Timofey Grechkin and Evan Suma Rosenberg},
url = {http://ieeexplore.ieee.org/abstract/document/7892235/},
doi = {10.1109/VR.2017.7892235},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {91–98},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {As the focus of virtual reality technology is shifting from singleperson experiences to multi-user interactions, it becomes increasingly important to accommodate multiple co-located users within a shared real-world space. For locomotion and navigation, the introduction of multiple users moving both virtually and physically creates additional challenges related to potential user-on-user collisions. In this work, we focus on defining the extent of these challenges, in order to apply redirected walking to two users immersed in virtual reality experiences within a shared physical tracked space. Using a computer simulation framework, we explore the costs and benefits of splitting available physical space between users versus attempting to algorithmically prevent user-to-user collisions. We also explore fundamental components of collision prevention such as steering the users away from each other, forced stopping, and user re-orientation. Each component was analyzed for the number of potential disruptions to the flow of the virtual experience. We also develop a novel collision prevention algorithm that reduces overall interruptions by 17.6% and collision prevention events by 58.3%. Our results show that sharing space using our collision prevention method is superior to subdividing the tracked space.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Chih-Fan; Bolas, Mark; Rosenberg, Evan Suma
Rapid Creation of Photorealistic Virtual Reality Content with Consumer Depth Cameras Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 473–474, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{chen_rapid_2017,
title = {Rapid Creation of Photorealistic Virtual Reality Content with Consumer Depth Cameras},
author = {Chih-Fan Chen and Mark Bolas and Evan Suma Rosenberg},
url = {http://ieeexplore.ieee.org/abstract/document/7892385/},
doi = {10.1109/VR.2017.7892385},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {473–474},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Virtual objects are essential for building environments in virtual reality (VR) applications. However, creating photorealistic 3D models is not easy, and handcrafting the detailed 3D model from a real object can be time and labor intensive. An alternative way is to build a structured camera array such as a light-stage to reconstruct the model from a real object. However, these technologies are very expensive and not practical for most users. In this work, we demonstrate a complete end-to-end pipeline for the capture, processing, and rendering of view-dependent 3D models in virtual reality from a single consumer-grade RGB-D camera. The geometry model and the camera trajectories are automatically reconstructed from a RGB-D image sequence captured offline. Based on the HMD position, selected images are used for real-time model rendering. The result of this pipeline is a 3D mesh with view-dependent textures suitable for real-time rendering in virtual reality. Specular reflections and light-burst effects are especially noticeable when users view the objects from different perspectives in a head-tracked environment.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Spicer, Ryan; McAlinden, Ryan; Conover, Damon
Producing Usable Simulation Terrain Data from UAS-Collected Imagery Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, STG, UARC
@inproceedings{spicer_producing_2016,
title = {Producing Usable Simulation Terrain Data from UAS-Collected Imagery},
author = {Ryan Spicer and Ryan McAlinden and Damon Conover},
url = {http://ict.usc.edu/pubs/Producing%20Usable%20Simulation%20Terrain%20Data%20from%20UAS-Collected%20Imagery.pdf},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {At I/ITSEC 2015, we presented an approach to produce geo-referenced, highly-detailed (10cm or better) 3D models for an area of interest using imagery collected from cheap, commercial-off-the-shelf, multirotor Unmanned Aerial Systems (UAS). This paper discusses the next steps in making this data usable for modern-day game and simulation engines, specifically how it may be visually rendered, used and reasoned with by the physics system, the artificial intelligence (AI), the simulation entities, and other components. The pipeline begins by segmenting the georeferenced point cloud created by the UAS imagery into terrain (elevation data) and structures or objects, including vegetation, structures, roads and other surface features. Attributes such as slope and edge detection and color matching are used to perform segmentation and clustering. After the terrain and objects are segmented, they are exported into engine-agnostic formats (georeferenced GeoTIFF digital elevation model (DEM) and ground textures, OBJ/FBX mesh files and JPG textures), which serves as the basis for their representation in-engine. The data is then attributed with metadata used in reasoning – collision surfaces, navigation meshes/networks, apertures, physics attributes (line-of-sight, ray-tracing), material surfaces, and others. Finally, it is loaded into the engine for real-time processing during runtime. The pipeline has been tested with several engines, including Unity, VBS, Unreal and TitanIM. The paper discusses the pipeline from collection to rendering, and as well as how other market/commercially-derived data can serve as the foundation for M&S terrain in the future. Examples of the output of this research are available online (McAlinden, 2016).},
keywords = {ARL, DoD, MxR, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Kang, Sin-Hwa; Nye, Benjamin; Phillips, Artemisa; Campbell, Julia; Goldberg, Stephan L.
Cost-Effective Strategies for Producing Engaging Online Courseware Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, MedVR, MxR, STG, UARC
@inproceedings{mcalinden_cost-effective_2016,
title = {Cost-Effective Strategies for Producing Engaging Online Courseware},
author = {Ryan McAlinden and Sin-Hwa Kang and Benjamin Nye and Artemisa Phillips and Julia Campbell and Stephan L. Goldberg},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {As distributed learning (dL) and computer-based training (CBT) continue to proliferate, the methods of delivery often remain unengaging and bland for participants. Though many of the leaders in commercial online learning have improved their delivery style and quality in recent years, they continue to fall short in terms of user engagement and satisfaction. PowerPoint regurgitation and video lectures are commonplace and leave end users uninspired and wanting more. This paper discusses results from an ongoing research project, Captivating Virtual Instruction for Training (CVIT), which is aimed at understanding and improving dL through a series of recommendations and best practices for promoting and enhancing student engagement online. Though the central focus is on engagement, and how that translates to learning potential, a third variable (cost) has been examined to understand the financial and resource impacts on making content more interesting (i.e. the return on investment, or ROI). The paper presents findings from a 3-year long experiment comparing existing dL methods and techniques both within and outside of the Army. The project developed two dL versions of an existing Army course (Advanced Situational Awareness-Basic (ASA-B)) – the first was designed around producing material that was as engaging and as immersive as possible within a target budget; the second was a scaled-down version using more traditional, yet contemporary dL techniques (PowerPoint recital, video lectures). The two were then compared along three dimensions– engagement, learning and cost. The findings show that improved engagement in distributed courseware is possible without breaking the bank, though the returns on learning with these progressive approaches remain inconclusive. More importantly, it was determined that the quality and experience of the designers, production staff, writers, animators, programmers, and others cannot be underestimated, and that the familiar phrase – ‘you get what you pay for’ is as true with online learning as it is with other areas of content design and software development.},
keywords = {ARL, DoD, Learning Sciences, MedVR, MxR, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, J. Adam; Krum, David M.; Bolas, Mark T.
Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments Journal Article
In: ACM Transactions on Applied Perception, vol. 14, no. 2, pp. 1–17, 2016, ISSN: 15443558.
Abstract | Links | BibTeX | Tags: MxR
@article{jones_vertical_2016,
title = {Vertical Field-of-View Extension and Walking Characteristics in Head-Worn Virtual Environments},
author = {J. Adam Jones and David M. Krum and Mark T. Bolas},
url = {http://dl.acm.org/citation.cfm?id=2983631},
doi = {10.1145/2983631},
issn = {15443558},
year = {2016},
date = {2016-10-01},
journal = {ACM Transactions on Applied Perception},
volume = {14},
number = {2},
pages = {1–17},
abstract = {In this article, we detail a series of experiments that examines the effect of vertical field-of-view extension and the addition of non-specific peripheral visual stimulation on gait characteristics and distance judgments in a head-worn virtual environment. Specifically, we examined four field-of-view configurations: a common 60° diagonal field of view (48° × 40°), a 60° diagonal field of view with the addition of a luminous white frame in the far periphery, a field of view with an extended upper edge, and a field of view with an extended lower edge. We found that extension of the field of view, either with spatially congruent or spatially non-informative visuals, resulted in improved distance judgments and changes in observed posture. However, these effects were not equal across all field-of-view configurations, suggesting that some configurations may be more appropriate than others when balancing performance, cost, and ergonomics.},
keywords = {MxR},
pubstate = {published},
tppubtype = {article}
}
Chen, Chih-Fan; Bolas, Mark; Suma, Evan
Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, pp. 1–2, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4371-8.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{chen_real-time_2016,
title = {Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping},
author = {Chih-Fan Chen and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2945162},
doi = {10.1145/2945078.2945162},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
pages = {1–2},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {With the recent proliferation of high-fidelity head-mounted displays (HMDs), there is increasing demand for realistic 3D content that can be integrated into virtual reality environments. However, creating photorealistic models is not only difficult but also time consuming. A simpler alternative involves scanning objects in the real world and rendering their digitized counterpart in the virtual world. Capturing objects can be achieved by performing a 3D scan using widely available consumer-grade RGB-D cameras. This process involves reconstructing the geometric model from depth images generated using a structured light or time-of-flight sensor. The colormap is determined by fusing data from multiple color images captured during the scan. Existing methods compute the color of each vertex by averaging the colors from all these images. Blending colors in this manner creates low-fidelity models that appear blurry. (Figure 1 right). Furthermore, this approach also yields textures with fixed lighting that is baked on the model. This limitation becomes more apparent when viewed in head-tracked virtual reality, as the illumination (e.g. specular reflections) does not change appropriately based on the user's viewpoint},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Barreto, Joseph; Alexander, Oleg; Bolas, Mark; Debevec, Paul; Unger, Jonas
Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array Proceedings Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18–26, Las Vegas, NV, 2016.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC
@inproceedings{jones_time-offset_2016,
title = {Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array},
author = {Andrew Jones and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Joseph Barreto and Oleg Alexander and Mark Bolas and Paul Debevec and Jonas Unger},
url = {http://www.cv-foundation.org//openaccess/content_cvpr_2016_workshops/w16/papers/Jones_Time-Offset_Conversations_on_CVPR_2016_paper.pdf},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},
pages = {18–26},
address = {Las Vegas, NV},
abstract = {We present a system for creating and displaying interactive life-sized 3D digital humans based on pre-recorded interviews. We use 30 cameras and an extensive list of questions to record a large set of video responses. Users access videos through a natural conversation interface that mimics face-to-face interaction. Recordings of answers, listening and idle behaviors are linked together to create a persistent visual image of the person throughout the interaction. The interview subjects are rendered using flowed light fields and shown life-size on a special rear-projection screen with an array of 216 video projectors. The display allows multiple users to see different 3D perspectives of the subject in proper relation to their viewpoints, without the need for stereo glasses. The display is effective for interactive conversations since it provides 3D cues such as eye gaze and spatial hand gestures.},
keywords = {Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Grechkin, Timofey; Thomas, Jerald; Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Revisiting detection thresholds for redirected walking: combining translation and curvature gains Proceedings Article
In: Proceedings of the ACM Symposium on Applied Perception, pp. 113–120, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4383-1.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{grechkin_revisiting_2016,
title = {Revisiting detection thresholds for redirected walking: combining translation and curvature gains},
author = {Timofey Grechkin and Jerald Thomas and Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2931018},
doi = {10.1145/2931002.2931018},
isbn = {978-1-4503-4383-1},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
pages = {113–120},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {Redirected walking enables the exploration of large virtual environments while requiring only a finite amount of physical space. Unfortunately, in living room sized tracked areas the effectiveness of common redirection algorithms such as Steer-to-Center is very limited. A potential solution is to increase redirection effectiveness by applying two types of perceptual manipulations (curvature and translation gains) simultaneously. This paper investigates how such combination may affect detection thresholds for curvature gain. To this end we analyze the estimation methodology and discuss selection process for a suitable estimation method. We then compare curvature detection thresholds obtained under different levels of translation gain using two different estimation methods: method of constant stimuli and Green’s maximum likelihood procedure. The data from both experiments shows no evidence that curvature gain detection thresholds were affected by the presence of translation gain (with test levels spanning previously estimated interval of undetectable translation gain levels). This suggests that in practice currently used levels of translation and curvature gains can be safely applied simultaneously. Furthermore, we present some evidence that curvature detection thresholds may be lower that previously reported. Our estimates indicate that users can be redirected on a circular arc with radius of either 11.6m or 6.4m depending on the estimation method vs. the previously reported value of 22m. These results highlight that the detection threshold estimates vary significantly with the estimation method and suggest the need for further studies to define efficient and reliable estimation methodology},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Phan, Thai; Bolas, Mark; Krum, David M.
User Perceptions of a Virtual Human Over Mobile Video Chat Interactions Book Section
In: Human-Computer Interaction. Novel User Experiences, vol. 9733, pp. 107–118, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39512-8 978-3-319-39513-5.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@incollection{kang_user_2016,
title = {User Perceptions of a Virtual Human Over Mobile Video Chat Interactions},
author = {Sin-Hwa Kang and Thai Phan and Mark Bolas and David M. Krum},
url = {http://download.springer.com/static/pdf/913/chp%253A10.1007%252F978-3-319-39513-5_10.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-319-39513-5_10&token2=exp=1474906977 acl=%2Fstatic%2Fpdf%2F913%2Fchp%25253A10.1007%25252F978-3-319-39513-5_10.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Fchapter%252F10.1007%252F978-3-319-39513-5_10* hmac=14d38ee320936bf1edfc65a0d3fcc0855c42e0baba46e0f3a9a81293698b8b68},
isbn = {978-3-319-39512-8 978-3-319-39513-5},
year = {2016},
date = {2016-06-01},
booktitle = {Human-Computer Interaction. Novel User Experiences},
volume = {9733},
pages = {107–118},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {We believe that virtual humans, presented over video chat services, such as Skype, and delivered using smartphones, can be an effective way to deliver innovative applications where social interactions are important, such as counseling and coaching. To explore this subject, we have built a hardware and software apparatus that allows virtual humans to initiate, receive, and interact over video calls using Skype or any similar service. With this platform, we conducted two experiments to investigate the applications and characteristics of virtual humans that interact over mobile video. In Experiment 1, we investigated user reactions to the physical realism of the background scene in which a virtual human was displayed. In Experiment 2, we examined how virtual characters can establish and maintain longer term relationships with users, using ideas from Social Exchange Theory to strengthen bonds between interactants. Experiment 2 involved repeated interactions with a virtual human over a period of time. Both studies used counseling-style interactions with users. The results demonstrated that males were more attracted socially to a virtual human that was presented over a realistic background than a featureless background while females were more socially attracted to a virtual human with a less realistic featureless background. The results further revealed that users felt the virtual human was a compassionate partner when they interacted with the virtual human over multiple calls, rather than just a single call.},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC, Virtual Humans
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {Graphics, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hutton, Courtney; Suma, Evan
A Realistic Walking Model for Enhancing Redirection in Virtual Reality Proceedings Article
In: 2016 IEEE Virtual Reality (VR), pp. 183–184, IEEE, Greenville, SC, 2016.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{hutton_realistic_2016,
title = {A Realistic Walking Model for Enhancing Redirection in Virtual Reality},
author = {Courtney Hutton and Evan Suma},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504714},
doi = {10.1109/VR.2016.7504714},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Virtual Reality (VR)},
pages = {183–184},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirected walking algorithms require the prediction of human motion in order to effectively steer users away from the boundaries of the physical space. While a virtual walking trajectory may be represented using straight lines connecting waypoints of interest, this simple model does not accurately represent typical user behavior. In this poster we present a more realistic walking model for use in real-time virtual environments that employ redirection techniques. We implemented the model within a framework that can be used for simulation of redirected walking within different virtual and physical environments. Such simulations are useful for the evaluation of redirected walking algorithms and the tuning of parameters under varying conditions. Additionally, the model can also be used to animate an artificial humanoid “ghost walker” to provide a visual demonstration of redirected walking in virtual reality.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Kang, Sin-Hwa; Phan, Thai; Dukes, Lauren Cairco; Bolas, Mark
Head Mounted Projection for Enhanced Gaze in Social Interactions Proceedings Article
In: 2016 IEEE Virtual Reality (VR), pp. 209–210, IEEE, Greenville, SC, 2016.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@inproceedings{krum_head_2016,
title = {Head Mounted Projection for Enhanced Gaze in Social Interactions},
author = {David M. Krum and Sin-Hwa Kang and Thai Phan and Lauren Cairco Dukes and Mark Bolas},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504727},
doi = {10.1109/VR.2016.7504727},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Virtual Reality (VR)},
pages = {209–210},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals. This could cause perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing a weapon. We discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nilsson, Niels; Suma, Evan; Nordahl, Rolf; Bolas, Mark; Serafin, Stefania
Estimation of Detection Thresholds for Audiovisual Rotation Gains Proceedings Article
In: IEEE Virtual Reality 2016, pp. ID: A22, IEEE, Greenville, SC, 2016.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{nilsson_estimation_2016,
title = {Estimation of Detection Thresholds for Audiovisual Rotation Gains},
author = {Niels Nilsson and Evan Suma and Rolf Nordahl and Mark Bolas and Stefania Serafin},
url = {http://ieeevr.org/2016/posters/},
year = {2016},
date = {2016-03-01},
booktitle = {IEEE Virtual Reality 2016},
pages = {ID: A22},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirection techniques allow users to explore large virtual environments on foot while remaining within a limited physical space. However, research has primarily focused on redirection through manipulation of visual stimuli. We describe a within-subjects study (n=31) exploring if participants’ ability to detect differences between real and virtual rotations is influenced by the addition of sound that is spatially aligned with its virtual source. The results revealed similar detection thresholds for conditions involving moving audio, static audio, and no audio. This may be viewed as an indication of visual dominance during scenarios such as the one used for the current study.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Phan, Thai; Krum, David M.; Bolas, Mark
ShodanVR: Immersive Visualization of Text Records from the Shodan Database Proceedings Article
In: Proceedings of the 2016 Workshop on Immersive Analytics (IA), IEEE, Greenville,SC, 2016, ISBN: 978-1-5090-0834-6.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{phan_shodanvr_2016,
title = {ShodanVR: Immersive Visualization of Text Records from the Shodan Database},
author = {Thai Phan and David M. Krum and Mark Bolas},
url = {http://ieeexplore.ieee.org/document/7932379/?part=1},
doi = {10.1109/IMMERSIVE.2016.7932379},
isbn = {978-1-5090-0834-6},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of the 2016 Workshop on Immersive Analytics (IA)},
publisher = {IEEE},
address = {Greenville,SC},
abstract = {ShodanVR is an immersive visualization for querying and displaying text records from the Shodan database of Internet connected devices. Shodan provides port connection data retrieved from servers, routers, and other networked devices [2]. Cybersecurity professionals can glean this data for device populations, software versions, and potential security vulnerabilities [1].},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments Proceedings Article
In: 2nd Workshop on Everyday Virtual Reality, IEEE, Greenville, SC, 2016.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{azmandian_redirected_2016,
title = {The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://www.adalsimeone.me/papers/WEVR2016/WEVR2016_Azmandian.pdf},
year = {2016},
date = {2016-03-01},
booktitle = {2nd Workshop on Everyday Virtual Reality},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {With the imminent emergence of low-cost tracking solutions, everyday VR users will soon experience the enhanced immersion of natural walking. Even with consumer-grade room-scale tracking, exploring large virtual environments can be made possible using a software solution known as redirected walking. Wide adoption of this technique has been hindered by the complexity and subtleties involved in successfully deploying redirection. To address this matter, we introduce the Redirected Walking Toolkit, to serve as a unified platform for developing, benchmarking, and deploying redirected walking algorithms. Our design enables seamless integration with standard virtual reality configurations, requiring minimal setup effort for content developers.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
Automated Path Prediction for Redirected Walking Using Navigation Meshes Proceedings Article
In: 2016 IEEE Symposium on 3D User Interfaces (3DUI), pp. 63–66, IEEE, Greenville, SC, 2016.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{azmandian_automated_2016,
title = {Automated Path Prediction for Redirected Walking Using Navigation Meshes},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7460032},
doi = {10.1109/3DUI.2016.7460032},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Symposium on 3D User Interfaces (3DUI)},
pages = {63–66},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirected walking techniques have been introduced to overcome physical space limitations for natural locomotion in virtual reality. These techniques decouple real and virtual user trajectories by subtly steering the user away from the boundaries of the physical space while maintaining the illusion that the user follows the intended virtual path. Effectiveness of redirection algorithms can significantly improve when a reliable prediction of the users future virtual path is available. In current solutions, the future user trajectory is predicted based on non-standardized manual annotations of the environment structure, which is both tedious and inflexible. We propose a method for automatically generating environment annotation graphs and predicting the user trajectory using navigation meshes. We discuss the integration of this method with existing redirected walking algorithms such as FORCE and MPCRed. Automated annotation of the virtual environments structure enables simplified deployment of these algorithms in any virtual environment.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
McAlinden, Ryan; Suma, Evan; Grechkin, Timofey; Enloe, Michael
Procedural Reconstruction of Simulation Terrain Using Drones Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
Abstract | Links | BibTeX | Tags: MxR, STG
@inproceedings{mcalinden_procedural_2015,
title = {Procedural Reconstruction of Simulation Terrain Using Drones},
author = {Ryan McAlinden and Evan Suma and Timofey Grechkin and Michael Enloe},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Photogrammetric techniques for constructing 3D virtual environments have previously been plagued by expensive equipment, imprecise and visually unappealing results. However, with the introduction of low-cost, off-the-shelf (OTS) unmanned aerial systems (UAS), lighter and capable cameras, and more efficient software techniques for reconstruction, the modeling and simulation (M&S) community now has available to it new types of virtual assets that are suited for modern-day games and simulations. This paper presents an approach for fully autonomously collecting, processing, storing and rendering highly-detailed geo-specific terrain data using these OTS techniques and methods. We detail the types of equipment used, the flight parameters, the processing and reconstruction pipeline, and finally the results of using the dataset in a game/simulation engine. A key objective of the research is procedurally segmenting the terrain into usable features that the engine can interpret – i.e. distinguishing between roads, buildings, vegetation, etc. This allows the simulation core to assign attributes related to physics, lighting, collision cylinders and navigation meshes that not only support basic rendering of the model but introduce interaction with it. The results of this research are framed in the context of a new paradigm for geospatial collection, analysis and simulation. Specifically, the next generation of M&S systems will need to integrate environmental representations that have higher detail and richer metadata while ensuring a balance between performance and usability.},
keywords = {MxR, STG},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
Physical Space Requirements for Redirected Walking: How Size and Shape Affect Performance Proceedings Article
In: Eurographics Symposium on Virtual Environments (2015), pp. 93–100, The Eurographics Association, Kyoto, Japan, 2015, ISBN: 978-3-905674-84-2.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{azmandian_physical_2015,
title = {Physical Space Requirements for Redirected Walking: How Size and Shape Affect Performance},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {https://diglib.eg.org/handle/10.2312/13833},
doi = {10.2312/egve.20151315},
isbn = {978-3-905674-84-2},
year = {2015},
date = {2015-10-01},
booktitle = {Eurographics Symposium on Virtual Environments (2015)},
pages = {93–100},
publisher = {The Eurographics Association},
address = {Kyoto, Japan},
abstract = {Redirected walking provides a compelling solution to explore large virtual environments in a natural way. However, research literature provides few guidelines regarding trade-offs involved in selecting size and layout for physical tracked space. We designed a rigorously controlled benchmarking framework and conducted two simulated user experiments to systematically investigate how the total area and dimensions of the tracked space affect performance of steer-to-center and steer-to-orbit algorithms. The results indicate that minimum viable size of physical tracked space for these redirected walking algorithms is approximately 6m 6m with performance continuously improving in larger tracked spaces. At the same time, no ”optimal” tracked space size can guarantee the absence of contacts with the boundary. We also found that square tracked spaces enabled best overall performance with steer-to-center algorithm also performing well in moderately elongated rectangular spaces. Finally, we demonstrate that introducing translation gains can provide a useful boost in performance, particularly when physical space is constrained. We conclude with the discussion of potential applications of our benchmarking toolkit to other problems related to performance of redirected walking platforms.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Phan, Thai; Bolas, Mark
"Hi, It's Me Again!": Virtual Coaches over Mobile Video Proceedings Article
In: Proceedings of the 3rd International Conference on Human-Agent Interaction, pp. 183–186, ACM, Daegu, Korea, 2015, ISBN: 978-1-4503-3527-0.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@inproceedings{kang_hi_2015,
title = {"Hi, It's Me Again!": Virtual Coaches over Mobile Video},
author = {Sin-Hwa Kang and David M. Krum and Thai Phan and Mark Bolas},
url = {http://dl.acm.org/citation.cfm?id=2814970},
isbn = {978-1-4503-3527-0},
year = {2015},
date = {2015-10-01},
booktitle = {Proceedings of the 3rd International Conference on Human-Agent Interaction},
pages = {183–186},
publisher = {ACM},
address = {Daegu, Korea},
abstract = {We believe that virtual humans presented over video chat services, such as Skype via smartphones, can be an effective way to deliver innovative applications where social interactions are important, such as counseling and coaching. We hypothesize that the context of a smartphone communication channel, i.e. how a virtual human is presented within a smartphone app, and indeed, the nature of that app, can profoundly affect how a real human perceives the virtual human. We have built an apparatus that allows virtual humans to initiate, receive, and interact over video calls using Skype or any similar service. With this platform, we are examining effective designs and social implications of virtual humans that interact over mobile video. The current study examines a relationship involving repeated counseling-style interactions with a virtual human, leveraging the virtual human’s ability to call and interact with a real human on multiple occasions over a period of time. The results and implications of this preliminary study suggest that repeated interactions may improve perceived social characteristics of the virtual human.},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Honig, Wolfgang; Milanes, Christina; Scaria, Lisa; Phan, Thai; Bolas, Mark; Ayanian, Nora
Mixed Reality for Robotics Proceedings Article
In: 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 5382 – 5387, IEEE, Hamburg, Germany, 2015.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{honig_mixed_2015,
title = {Mixed Reality for Robotics},
author = {Wolfgang Honig and Christina Milanes and Lisa Scaria and Thai Phan and Mark Bolas and Nora Ayanian},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7354138&tag=1},
doi = {10.1109/IROS.2015.7354138},
year = {2015},
date = {2015-09-01},
booktitle = {2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {5382 – 5387},
publisher = {IEEE},
address = {Hamburg, Germany},
abstract = {Mixed Reality can be a valuable tool for research and development in robotics. In this work, we refine the definition of Mixed Reality to accommodate seamless interaction between physical and virtual objects in any number of physical or virtual environments. In particular, we show that Mixed Reality can reduce the gap between simulation and implementation by enabling the prototyping of algorithms on a combination of physical and virtual objects, including robots, sensors, and humans. Robots can be enhanced with additional virtual capabilities, or can interact with humans without sharing physical space. We demonstrate Mixed Reality with three representative experiments, each of which highlights the advantages of our approach. We also provide a testbed for Mixed Reality with three different virtual robotics environments in combination with the Crazyflie 2.0 quadcopter.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}