Publications
Search
Jones, Brennan; Xu, Yan; Li, Qisheng; Scherer, Stefan
Designing a Proactive Context-Aware AI Chatbot for People's Long-Term Goals Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–7, ACM, Honolulu HI USA, 2024, ISBN: 9798400703317.
@inproceedings{jones_designing_2024,
title = {Designing a Proactive Context-Aware AI Chatbot for People's Long-Term Goals},
author = {Brennan Jones and Yan Xu and Qisheng Li and Stefan Scherer},
url = {https://dl.acm.org/doi/10.1145/3613905.3650912},
doi = {10.1145/3613905.3650912},
isbn = {9798400703317},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-25},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–7},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Training Reinforcement Learning Agents to React to an Ambush for Military Simulations Journal Article
In: FLAIRS, vol. 37, 2024, ISSN: 2334-0762.
@article{aris_training_2024,
title = {Training Reinforcement Learning Agents to React to an Ambush for Military Simulations},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/135578},
doi = {10.32473/flairs.37.1.135578},
issn = {2334-0762},
year = {2024},
date = {2024-05-01},
urldate = {2024-08-13},
journal = {FLAIRS},
volume = {37},
abstract = {There is a need for realistic Opposing Forces (OPFOR)behavior in military training simulations. Current trainingsimulations generally only have simple, non-adaptivebehaviors, requiring human instructors to play the role ofOPFOR in any complicated scenario. This poster addressesthis need by focusing on a specific scenario: trainingreinforcement learning agents to react to an ambush. Itproposes a novel way to check for occlusion algorithmically.It shows vector fields showing the agent’s actions throughthe course of a training run. It shows that a single agentswitching between multiple goals is possible, at least in asimplified environment. Such an approach could reduce theneed to develop different agents for different scenarios.Finally, it shows a competent agent trained on a simplifiedReact to Ambush scenario, demonstrating the plausibility ofa scaled-up version.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert Skip; Hartholt, Arno; Mozgai, Sharon
Settling the Score: Virtual Reality as a Tool to Enhance Trauma-Focused Therapy for PTSD Book Section
In: Rich, Grant J.; Kumar, V. K.; Farley, Frank H. (Ed.): Handbook of Media Psychology, pp. 187–213, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-56536-6 978-3-031-56537-3.
@incollection{rich_settling_2024,
title = {Settling the Score: Virtual Reality as a Tool to Enhance Trauma-Focused Therapy for PTSD},
author = {Albert Skip Rizzo and Arno Hartholt and Sharon Mozgai},
editor = {Grant J. Rich and V. K. Kumar and Frank H. Farley},
url = {https://link.springer.com/10.1007/978-3-031-56537-3_14},
doi = {10.1007/978-3-031-56537-3_14},
isbn = {978-3-031-56536-6 978-3-031-56537-3},
year = {2024},
date = {2024-04-01},
urldate = {2024-06-18},
booktitle = {Handbook of Media Psychology},
pages = {187–213},
publisher = {Springer Nature Switzerland},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.
Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents Journal Article
In: Safety Science, vol. 164, pp. 106175, 2023, ISSN: 09257535.
@article{liu_effectiveness_2023,
title = {Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0925753523001170},
doi = {10.1016/j.ssci.2023.106175},
issn = {09257535},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-22},
journal = {Safety Science},
volume = {164},
pages = {106175},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Ruying; Zhu, Runhe; Becerik‐Gerber, Burcin; Lucas, Gale M.; Southers, Erroll G.
Be prepared: How training and emergency type affect evacuation behaviour Journal Article
In: Computer Assisted Learning, pp. jcal.12812, 2023, ISSN: 0266-4909, 1365-2729.
@article{liu_be_2023,
title = {Be prepared: How training and emergency type affect evacuation behaviour},
author = {Ruying Liu and Runhe Zhu and Burcin Becerik‐Gerber and Gale M. Lucas and Erroll G. Southers},
url = {https://onlinelibrary.wiley.com/doi/10.1111/jcal.12812},
doi = {10.1111/jcal.12812},
issn = {0266-4909, 1365-2729},
year = {2023},
date = {2023-04-01},
urldate = {2023-08-22},
journal = {Computer Assisted Learning},
pages = {jcal.12812},
abstract = {Abstract
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; Hou, Yu; McCullough, Kyle; Prasad, Pratusha Bhuvana; Soibelman, Lucio
Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach Journal Article
In: 2021.
@article{chen_ground_2021,
title = {Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach},
author = {Meida Chen and Andrew Feng and Yu Hou and Kyle McCullough and Pratusha Bhuvana Prasad and Lucio Soibelman},
url = {https://arxiv.org/abs/2109.12221},
doi = {10.48550/ARXIV.2109.12221},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-27},
abstract = {In recent years, photogrammetry has been widely used in many areas to create photorealistic 3D virtual data representing the physical environment. The innovation of small unmanned aerial vehicles (sUAVs) has provided additional high-resolution imaging capabilities with low cost for mapping a relatively large area of interest. These cutting-edge technologies have caught the US Army and Navy's attention for the purpose of rapid 3D battlefield reconstruction, virtual training, and simulations. Our previous works have demonstrated the importance of information extraction from the derived photogrammetric data to create semantic-rich virtual environments (Chen et al., 2019). For example, an increase of simulation realism and fidelity was achieved by segmenting and replacing photogrammetric trees with game-ready tree models. In this work, we further investigated the semantic information extraction problem and focused on the ground material segmentation and object detection tasks. The main innovation of this work was that we leveraged both the original 2D images and the derived 3D photogrammetric data to overcome the challenges faced when using each individual data source. For ground material segmentation, we utilized an existing convolutional neural network architecture (i.e., 3DMV) which was originally designed for segmenting RGB-D sensed indoor data. We improved its performance for outdoor photogrammetric data by introducing a depth pooling layer in the architecture to take into consideration the distance between the source images and the reconstructed terrain model. To test the performance of our improved 3DMV, a ground truth ground material database was created using data from the One World Terrain (OWT) data repository. Finally, a workflow for importing the segmented ground materials into a virtual simulation scene was introduced, and visual results are reported in this paper.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Oxendine, Christopher; O'Banion, Matt; Wright, William; Irmischer, Ian; Fleming, Steven
Rapid Terrain Generation for GeoVisualization, Simulation, Mission Rehearsal, & Operations Journal Article
In: 2019 State and Future of GEOINT Report, pp. 5, 2019.
@article{oxendine_rapid_2019,
title = {Rapid Terrain Generation for GeoVisualization, Simulation, Mission Rehearsal, & Operations},
author = {Christopher Oxendine and Matt O'Banion and William Wright and Ian Irmischer and Steven Fleming},
url = {https://digitalcommons.usmalibrary.org/usma_research_papers/151/},
year = {2019},
date = {2019-06-01},
journal = {2019 State and Future of GEOINT Report},
pages = {5},
abstract = {Geospecific 3D terrain representation (aka reality modeling) is revolutionizing geovisualization, simulation, and engineering practices around the world. In tandem with the rapid growth in unmanned aerial systems (UAS) and small satellites, reality modeling advancements now allow geospatial intelligence (GEOINT) practitioners to generate three-dimensional models from a decentralized collection of digital images to meet mission needs in both urban and rural environments. Scalable mesh models deliver enhanced, real-world visualization for engineers, geospatial teams, combatant, and combat support organizations. In this, reality modeling provides a detailed understanding of the physical environment, and models allow installation engineers and GEOINT practitioners to quickly generate updated, high-precision 3D reality meshes to provide real-world digital context for the decision-making process.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fleming, Steven D; O’Banion, Matt S; McAlinden, Ryan; Oxendine, Christopher; Wright, William; Irmischer, Ian
Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations Journal Article
In: Annual Report (State and Future of GEOINT), pp. 5, 2019.
@article{fleming_rapid_2019,
title = {Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations},
author = {Steven D Fleming and Matt S O’Banion and Ryan McAlinden and Christopher Oxendine and William Wright and Ian Irmischer},
url = {http://trajectorymagazine.com/rapid-terrain-generation/},
year = {2019},
date = {2019-01-01},
journal = {Annual Report (State and Future of GEOINT)},
pages = {5},
abstract = {Geospecific 3D terrain representation (aka reality modeling) is revolutionizing geovisualization, simulation, and engineering practices around the world. In tandem with the rapid growth in unmanned aerial systems (UAS) and small satellites, reality modeling advancements now allow geospatial intelligence (GEOINT) practitioners to generate three-dimensional models from a decentralized collection of digital images to meet mission needs in both urban and rural environments. Scalable mesh models deliver enhanced, real-world visualization for engineers, geospatial teams, combatant, and combat support organizations. In this, reality modeling provides a detailed understanding of the physical environment, and models allow installation engineers and GEOINT practitioners to quickly generate updated, high-precision 3D reality meshes to provide real-world digital context for the decision-making process.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2024
Jones, Brennan; Xu, Yan; Li, Qisheng; Scherer, Stefan
Designing a Proactive Context-Aware AI Chatbot for People's Long-Term Goals Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–7, ACM, Honolulu HI USA, 2024, ISBN: 9798400703317.
Links | BibTeX | Tags: AI, Simulation
@inproceedings{jones_designing_2024,
title = {Designing a Proactive Context-Aware AI Chatbot for People's Long-Term Goals},
author = {Brennan Jones and Yan Xu and Qisheng Li and Stefan Scherer},
url = {https://dl.acm.org/doi/10.1145/3613905.3650912},
doi = {10.1145/3613905.3650912},
isbn = {9798400703317},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-25},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–7},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {AI, Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Training Reinforcement Learning Agents to React to an Ambush for Military Simulations Journal Article
In: FLAIRS, vol. 37, 2024, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: Simulation, VR
@article{aris_training_2024,
title = {Training Reinforcement Learning Agents to React to an Ambush for Military Simulations},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/135578},
doi = {10.32473/flairs.37.1.135578},
issn = {2334-0762},
year = {2024},
date = {2024-05-01},
urldate = {2024-08-13},
journal = {FLAIRS},
volume = {37},
abstract = {There is a need for realistic Opposing Forces (OPFOR)behavior in military training simulations. Current trainingsimulations generally only have simple, non-adaptivebehaviors, requiring human instructors to play the role ofOPFOR in any complicated scenario. This poster addressesthis need by focusing on a specific scenario: trainingreinforcement learning agents to react to an ambush. Itproposes a novel way to check for occlusion algorithmically.It shows vector fields showing the agent’s actions throughthe course of a training run. It shows that a single agentswitching between multiple goals is possible, at least in asimplified environment. Such an approach could reduce theneed to develop different agents for different scenarios.Finally, it shows a competent agent trained on a simplifiedReact to Ambush scenario, demonstrating the plausibility ofa scaled-up version.},
keywords = {Simulation, VR},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert Skip; Hartholt, Arno; Mozgai, Sharon
Settling the Score: Virtual Reality as a Tool to Enhance Trauma-Focused Therapy for PTSD Book Section
In: Rich, Grant J.; Kumar, V. K.; Farley, Frank H. (Ed.): Handbook of Media Psychology, pp. 187–213, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-56536-6 978-3-031-56537-3.
Links | BibTeX | Tags: DTIC, MedVR, Simulation, VR
@incollection{rich_settling_2024,
title = {Settling the Score: Virtual Reality as a Tool to Enhance Trauma-Focused Therapy for PTSD},
author = {Albert Skip Rizzo and Arno Hartholt and Sharon Mozgai},
editor = {Grant J. Rich and V. K. Kumar and Frank H. Farley},
url = {https://link.springer.com/10.1007/978-3-031-56537-3_14},
doi = {10.1007/978-3-031-56537-3_14},
isbn = {978-3-031-56536-6 978-3-031-56537-3},
year = {2024},
date = {2024-04-01},
urldate = {2024-06-18},
booktitle = {Handbook of Media Psychology},
pages = {187–213},
publisher = {Springer Nature Switzerland},
address = {Cham},
keywords = {DTIC, MedVR, Simulation, VR},
pubstate = {published},
tppubtype = {incollection}
}
2023
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.
Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents Journal Article
In: Safety Science, vol. 164, pp. 106175, 2023, ISSN: 09257535.
Links | BibTeX | Tags: DTIC, Simulation, UARC, virtual reality
@article{liu_effectiveness_2023,
title = {Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0925753523001170},
doi = {10.1016/j.ssci.2023.106175},
issn = {09257535},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-22},
journal = {Safety Science},
volume = {164},
pages = {106175},
keywords = {DTIC, Simulation, UARC, virtual reality},
pubstate = {published},
tppubtype = {article}
}
Liu, Ruying; Zhu, Runhe; Becerik‐Gerber, Burcin; Lucas, Gale M.; Southers, Erroll G.
Be prepared: How training and emergency type affect evacuation behaviour Journal Article
In: Computer Assisted Learning, pp. jcal.12812, 2023, ISSN: 0266-4909, 1365-2729.
Abstract | Links | BibTeX | Tags: DTIC, Simulation, UARC
@article{liu_be_2023,
title = {Be prepared: How training and emergency type affect evacuation behaviour},
author = {Ruying Liu and Runhe Zhu and Burcin Becerik‐Gerber and Gale M. Lucas and Erroll G. Southers},
url = {https://onlinelibrary.wiley.com/doi/10.1111/jcal.12812},
doi = {10.1111/jcal.12812},
issn = {0266-4909, 1365-2729},
year = {2023},
date = {2023-04-01},
urldate = {2023-08-22},
journal = {Computer Assisted Learning},
pages = {jcal.12812},
abstract = {Abstract
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.},
keywords = {DTIC, Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.
2021
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; Hou, Yu; McCullough, Kyle; Prasad, Pratusha Bhuvana; Soibelman, Lucio
Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach Journal Article
In: 2021.
Abstract | Links | BibTeX | Tags: DTIC, Simulation, UARC
@article{chen_ground_2021,
title = {Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach},
author = {Meida Chen and Andrew Feng and Yu Hou and Kyle McCullough and Pratusha Bhuvana Prasad and Lucio Soibelman},
url = {https://arxiv.org/abs/2109.12221},
doi = {10.48550/ARXIV.2109.12221},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-27},
abstract = {In recent years, photogrammetry has been widely used in many areas to create photorealistic 3D virtual data representing the physical environment. The innovation of small unmanned aerial vehicles (sUAVs) has provided additional high-resolution imaging capabilities with low cost for mapping a relatively large area of interest. These cutting-edge technologies have caught the US Army and Navy's attention for the purpose of rapid 3D battlefield reconstruction, virtual training, and simulations. Our previous works have demonstrated the importance of information extraction from the derived photogrammetric data to create semantic-rich virtual environments (Chen et al., 2019). For example, an increase of simulation realism and fidelity was achieved by segmenting and replacing photogrammetric trees with game-ready tree models. In this work, we further investigated the semantic information extraction problem and focused on the ground material segmentation and object detection tasks. The main innovation of this work was that we leveraged both the original 2D images and the derived 3D photogrammetric data to overcome the challenges faced when using each individual data source. For ground material segmentation, we utilized an existing convolutional neural network architecture (i.e., 3DMV) which was originally designed for segmenting RGB-D sensed indoor data. We improved its performance for outdoor photogrammetric data by introducing a depth pooling layer in the architecture to take into consideration the distance between the source images and the reconstructed terrain model. To test the performance of our improved 3DMV, a ground truth ground material database was created using data from the One World Terrain (OWT) data repository. Finally, a workflow for importing the segmented ground materials into a virtual simulation scene was introduced, and visual results are reported in this paper.},
keywords = {DTIC, Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
2019
Oxendine, Christopher; O'Banion, Matt; Wright, William; Irmischer, Ian; Fleming, Steven
Rapid Terrain Generation for GeoVisualization, Simulation, Mission Rehearsal, & Operations Journal Article
In: 2019 State and Future of GEOINT Report, pp. 5, 2019.
Abstract | Links | BibTeX | Tags: Simulation
@article{oxendine_rapid_2019,
title = {Rapid Terrain Generation for GeoVisualization, Simulation, Mission Rehearsal, & Operations},
author = {Christopher Oxendine and Matt O'Banion and William Wright and Ian Irmischer and Steven Fleming},
url = {https://digitalcommons.usmalibrary.org/usma_research_papers/151/},
year = {2019},
date = {2019-06-01},
journal = {2019 State and Future of GEOINT Report},
pages = {5},
abstract = {Geospecific 3D terrain representation (aka reality modeling) is revolutionizing geovisualization, simulation, and engineering practices around the world. In tandem with the rapid growth in unmanned aerial systems (UAS) and small satellites, reality modeling advancements now allow geospatial intelligence (GEOINT) practitioners to generate three-dimensional models from a decentralized collection of digital images to meet mission needs in both urban and rural environments. Scalable mesh models deliver enhanced, real-world visualization for engineers, geospatial teams, combatant, and combat support organizations. In this, reality modeling provides a detailed understanding of the physical environment, and models allow installation engineers and GEOINT practitioners to quickly generate updated, high-precision 3D reality meshes to provide real-world digital context for the decision-making process.},
keywords = {Simulation},
pubstate = {published},
tppubtype = {article}
}
Fleming, Steven D; O’Banion, Matt S; McAlinden, Ryan; Oxendine, Christopher; Wright, William; Irmischer, Ian
Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations Journal Article
In: Annual Report (State and Future of GEOINT), pp. 5, 2019.
Abstract | Links | BibTeX | Tags: DoD, Simulation, STG
@article{fleming_rapid_2019,
title = {Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations},
author = {Steven D Fleming and Matt S O’Banion and Ryan McAlinden and Christopher Oxendine and William Wright and Ian Irmischer},
url = {http://trajectorymagazine.com/rapid-terrain-generation/},
year = {2019},
date = {2019-01-01},
journal = {Annual Report (State and Future of GEOINT)},
pages = {5},
abstract = {Geospecific 3D terrain representation (aka reality modeling) is revolutionizing geovisualization, simulation, and engineering practices around the world. In tandem with the rapid growth in unmanned aerial systems (UAS) and small satellites, reality modeling advancements now allow geospatial intelligence (GEOINT) practitioners to generate three-dimensional models from a decentralized collection of digital images to meet mission needs in both urban and rural environments. Scalable mesh models deliver enhanced, real-world visualization for engineers, geospatial teams, combatant, and combat support organizations. In this, reality modeling provides a detailed understanding of the physical environment, and models allow installation engineers and GEOINT practitioners to quickly generate updated, high-precision 3D reality meshes to provide real-world digital context for the decision-making process.},
keywords = {DoD, Simulation, STG},
pubstate = {published},
tppubtype = {article}
}