Publications
Search
Liu, Ziming; Suen, Christine Wun Ki; Zou, Zhengbo; Chen, Meida; Shi, Yangming
Assessing Workers’ Operational Postures via Egocentric Camera Mapping Proceedings Article
In: Computing in Civil Engineering 2023, pp. 17–24, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8522-4.
@inproceedings{liu_assessing_2024,
title = {Assessing Workers’ Operational Postures via Egocentric Camera Mapping},
author = {Ziming Liu and Christine Wun Ki Suen and Zhengbo Zou and Meida Chen and Yangming Shi},
url = {https://ascelibrary.org/doi/10.1061/9780784485224.003},
doi = {10.1061/9780784485224.003},
isbn = {978-0-7844-8522-4},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-19},
booktitle = {Computing in Civil Engineering 2023},
pages = {17–24},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Zifan; Chen, Meida; Zhang, Zhikang; You, Suya; Ren, Fengbo
TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
@article{yu_transupr_2023,
title = {TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation},
author = {Zifan Yu and Meida Chen and Zhikang Zhang and Suya You and Fengbo Ren},
url = {https://arxiv.org/abs/2302.08594},
doi = {10.48550/ARXIV.2302.08594},
year = {2023},
date = {2023-02-01},
urldate = {2023-08-24},
abstract = {In this work, we target the problem of uncertain points refinement for image-based LiDAR point cloud semantic segmentation (LiDAR PCSS). This problem mainly results from the boundary-blurring problem of convolution neural networks (CNNs) and quantitation loss of spherical projection, which are often hard to avoid for common image-based LiDAR PCSS approaches. We propose a plug-and-play transformer-based uncertain point refiner (TransUPR) to address the problem. Through local feature aggregation, uncertain point localization, and self-attention-based transformer design, TransUPR, integrated into an existing range image-based LiDAR PCSS approach (e.g., CENet), achieves the state-of-the-art performance (68.2% mIoU) on Semantic-KITTI benchmark, which provides a performance improvement of 0.6% on the mIoU.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Meida; Feng, Andrew; McCullough, Kyle; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio
3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework Journal Article
In: Journal of Computing in Civil Engineering, vol. 34, no. 6, 2020, ISSN: 0887-3801, 1943-5487.
@article{chen_3d_2020,
title = {3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework},
author = {Meida Chen and Andrew Feng and Kyle McCullough and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0000929},
doi = {10.1061/(ASCE)CP.1943-5487.0000929},
issn = {0887-3801, 1943-5487},
year = {2020},
date = {2020-11-01},
journal = {Journal of Computing in Civil Engineering},
volume = {34},
number = {6},
abstract = {The US Army is paying increased attention to the development of rapid three-dimensional (3D) reconstruction using photogrammetry and unmanned aerial vehicle (UAV) technologies for creating virtual environments and simulations in areas of interest. The ability of the intelligence community, mission commanders, and front-line soldiers to understand their deployed physical environment in advance is critical in the planning and rehearsal phases of any military operation. In order to achieve various simulation capabilities such as destruction operations, route planning, and explosive-standoff distances computation among others, reconstructed 3D data needs to be properly attributed. In this paper, we introduce a model ensembling framework for segmenting a 3D photogrammetry point cloud into top-level terrain elements (i.e., ground, human-made objects, and vegetation). Preprocessing and postprocessing methods were designed to overcome the data segmentation challenges posed by photogrammetric data-quality issues. A large UAV-based photogrammetric database was created for validation purposes. The designed model ensembling framework was compared with existing point cloud segmentation algorithms, and it outperformed other algorithms and achieved the best F1-score. Because the ultimate goal of segmenting a photogrammetric-generated point cloud is to create realistic virtual environments for simulation. Qualitative results for creating virtual environments using the segmented data are also discussed in this paper. DOI: 10.1061/(ASCE)CP.1943-5487.0000929. © 2020 American Society of Civil Engineers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Meida; Feng, Andrew; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio; Enloe, Mike
Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),, pp. 13, ResearchGate, Orlando, FL, 2020.
@inproceedings{chen_fully_2020,
title = {Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain},
author = {Meida Chen and Andrew Feng and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman and Mike Enloe},
url = {https://www.researchgate.net/publication/338557943_Fully_Automated_Photogrammetric_Data_Segmentation_and_Object_Information_Extraction_Approach_for_Creating_Simulation_Terrain},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),},
pages = {13},
publisher = {ResearchGate},
address = {Orlando, FL},
abstract = {Our previous works have demonstrated that visually realistic 3D meshes can be automatically reconstructed with lowcost, off-the-shelf unmanned aerial systems (UAS) equipped with capable cameras, and efficient photogrammetric software techniques (McAlinden, Suma, Grechkin, & Enloe, 2015; Spicer, McAlinden, Conover, & Adelphi, 2016). However, such generated data do not contain semantic information/features of objects (i.e., man-made objects, vegetation, ground, object materials, etc.) and cannot allow the sophisticated user-level and system-level interaction. Considering the use case of the data in creating realistic virtual environments for training and simulations (i.e., mission planning, rehearsal, threat detection, etc.), segmenting the data and extracting object information are essential tasks. Previous studies have focused on and made valuable contributions to segment Light Detection and Ranging (LIDAR) generated 3D point clouds and classifying ground materials from real-world images. However, only a few studies have focused on the data created using the photogrammetric technique.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Gordon, Andrew S.
Latent Terrain Representations for Trajectory Prediction Proceedings Article
In: Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19, pp. 1–4, ACM Press, Chicago, IL, USA, 2019, ISBN: 978-1-4503-6951-0.
@inproceedings{feng_latent_2019,
title = {Latent Terrain Representations for Trajectory Prediction},
author = {Andrew Feng and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3356392.3365218},
doi = {10.1145/3356392.3365218},
isbn = {978-1-4503-6951-0},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19},
pages = {1–4},
publisher = {ACM Press},
address = {Chicago, IL, USA},
abstract = {In natural outdoor environments, the shape of the surface terrain is an important factor in selecting a traversal path, both when operating off-road vehicles and maneuvering on foot. With the increased availability of digital elevation models for outdoor terrain, new opportunities exist to exploit this contextual information to improve automated path prediction. In this paper, we investigate predictive neural network models for outdoor trajectories that traverse terrain with known surface topography. We describe a method of encoding digital surface models as vectors in latent space using Wasserstein Autoencoders, and their use in convolutional neural networks that predict future trajectory positions from past trajectory data. We observe gains in predictive performance across three experiments, using both synthetic and recorded trajectories on real-world terrain.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; McAlinden, Ryan; Soibelman, Lucio
Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations Journal Article
In: Journal of Management in Engineering, vol. 36, no. 2, pp. 04019046, 2019, ISSN: 0742-597X, 1943-5479.
@article{chen_photogrammetric_2019,
title = {Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations},
author = {Meida Chen and Andrew Feng and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29ME.1943-5479.0000737},
doi = {10.1061/(ASCE)ME.1943-5479.0000737},
issn = {0742-597X, 1943-5479},
year = {2019},
date = {2019-11-01},
journal = {Journal of Management in Engineering},
volume = {36},
number = {2},
pages = {04019046},
abstract = {Photogrammetric techniques have dramatically improved over the last few years, enabling the creation of visually compelling three-dimensional (3D) meshes using unmanned aerial vehicle imagery. These high-quality 3D meshes have attracted notice from both academicians and industry practitioners in developing virtual environments and simulations. However, photogrammetric generated point clouds and meshes do not allow both user-level and system-level interaction because they do not contain the semantic information to distinguish between objects. Thus, segmenting generated point clouds and meshes and extracting the associated object information is a necessary step. A framework for point cloud and mesh classification and segmentation is presented in this paper. The proposed framework was designed considering photogrammetric data-quality issues and provides a novel way of extracting object information, including (1) individual tree locations and related features and (2) building footprints. Experiments were conducted to rank different point descriptors and evaluate supervised machine-learning algorithms for segmenting photogrammetric generated point clouds. The proposed framework was validated using data collected at the University of Southern California (USC) and the Muscatatuck Urban Training Center (MUTC). DOI: 10.1061/(ASCE) ME.1943-5479.0000737. © 2019 American Society of Civil Engineers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fleming, Steven D; O’Banion, Matt S; McAlinden, Ryan; Oxendine, Christopher; Wright, William; Irmischer, Ian
Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations Journal Article
In: Annual Report (State and Future of GEOINT), pp. 5, 2019.
@article{fleming_rapid_2019,
title = {Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations},
author = {Steven D Fleming and Matt S O’Banion and Ryan McAlinden and Christopher Oxendine and William Wright and Ian Irmischer},
url = {http://trajectorymagazine.com/rapid-terrain-generation/},
year = {2019},
date = {2019-01-01},
journal = {Annual Report (State and Future of GEOINT)},
pages = {5},
abstract = {Geospecific 3D terrain representation (aka reality modeling) is revolutionizing geovisualization, simulation, and engineering practices around the world. In tandem with the rapid growth in unmanned aerial systems (UAS) and small satellites, reality modeling advancements now allow geospatial intelligence (GEOINT) practitioners to generate three-dimensional models from a decentralized collection of digital images to meet mission needs in both urban and rural environments. Scalable mesh models deliver enhanced, real-world visualization for engineers, geospatial teams, combatant, and combat support organizations. In this, reality modeling provides a detailed understanding of the physical environment, and models allow installation engineers and GEOINT practitioners to quickly generate updated, high-precision 3D reality meshes to provide real-world digital context for the decision-making process.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Meida; Astani, Sonny; McAlinden, Ryan; Spicer, Ryan
Semantic Modeling of Outdoor Scenes for the Creation of Virtual Environments and Simulations Proceedings Article
In: Proceedings of the 52nd Hawaii International Conference on System Sciences, pp. 10, IEEE, Maui, Hawaii, 2019, ISBN: 978-0-9981331-2-6.
@inproceedings{chen_semantic_2019,
title = {Semantic Modeling of Outdoor Scenes for the Creation of Virtual Environments and Simulations},
author = {Meida Chen and Sonny Astani and Ryan McAlinden and Ryan Spicer},
url = {https://scholarspace.manoa.hawaii.edu/handle/10125/59634},
doi = {10.24251/HICSS.2019.236},
isbn = {978-0-9981331-2-6},
year = {2019},
date = {2019-01-01},
booktitle = {Proceedings of the 52nd Hawaii International Conference on System Sciences},
pages = {10},
publisher = {IEEE},
address = {Maui, Hawaii},
abstract = {Efforts from both academia and industry have adopted photogrammetric techniques to generate visually compelling 3D models for the creation of virtual environments and simulations. However, such generated meshes do not contain semantic information for distinguishing between objects. To allow both user- and system-level interaction with the meshes, and enhance the visual acuity of the scene, classifying the generated point clouds and associated meshes is a necessary step. This paper presents a point cloud/mesh classification and segmentation framework. The proposed framework provides a novel way of extracting object information – i.e., individual tree locations and related features while considering the data quality issues presented in a photogrammetric-generated point cloud. A case study has been conducted using data that were collected at the University of Southern California to evaluate the proposed framework.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Koc, Eyuphan; Shi, Zhuoya; Soibelman, Lucio
Proactive 2D model-based scan planning for existing buildings Journal Article
In: Automation in Construction, vol. 93, pp. 165–177, 2018, ISSN: 09265805.
@article{chen_proactive_2018,
title = {Proactive 2D model-based scan planning for existing buildings},
author = {Meida Chen and Eyuphan Koc and Zhuoya Shi and Lucio Soibelman},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0926580517310385},
doi = {10.1016/j.autcon.2018.05.010},
issn = {09265805},
year = {2018},
date = {2018-09-01},
journal = {Automation in Construction},
volume = {93},
pages = {165–177},
abstract = {Creating a building information model (BIM) is known to be valuable during the life-cycle of a building. In most cases, a BIM of an existing building either does not exist or is out of date. For existing buildings, an as-is BIM is needed to leverage the technology towards building life-cycle objectives. To create an as-is BIM, field surveying is a necessary task in collecting current building related information. Terrestrial laser scanners have been widely accepted as field surveying instruments due to their high level of accuracy. However, laser scanning is a timeconsuming and labor-intensive process. Site revisiting and reworking of the scanning process is generally unavoidable because ofinappropriate datacollection processes. In thiscontext, creatinga scanplan beforegoing to a job-site can improve the data collection process. In this study, the authors have proposed a 2D proactive scanplanning frameworkthatincludesthreemodules: aninformation-gathering module,apreparation module,anda searching module. In addition, three search algorithms — a greedy best-first search algorithm, a greedy search algorithm with a backtracking process, and a simulated annealing algorithm — were compared based on 64 actual building site drawings to identify strength and limitations. The experimental results demonstrate that the greedy search algorithm with a backtracking process could be used to compute an initial scan plan and the simulated annealing algorithm couldbe used tofurther refinethe initial scanplan. This paperwill alsointroduce the results of a case study that deployed the proposed scan-planning framework. In the case study, the resulting 3D-point cloud that was generated based on the proposed framework was compared with the 3D point cloud created with data collected through a planned scanning process performed by a scan technician.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rosario, Dalton; Borel, Christoph; Conover, Damon; McAlinden, Ryan; Ortiz, Anthony; Shiver, Sarah; Simon, Blair
Small Drone Field Experiment: Data Collection & Processing Journal Article
In: NATO SET-241 Symposium, 2017.
@article{rosario_small_2017,
title = {Small Drone Field Experiment: Data Collection & Processing},
author = {Dalton Rosario and Christoph Borel and Damon Conover and Ryan McAlinden and Anthony Ortiz and Sarah Shiver and Blair Simon},
url = {https://arxiv.org/abs/1711.10693},
year = {2017},
date = {2017-11-01},
journal = {NATO SET-241 Symposium},
abstract = {Following an initiative formalized in April 2016—formally known as ARL West—between the U.S. Army Research Laboratory (ARL) and University of Southern California’s Institute for Creative Technologies (USC ICT), a field experiment was coordinated and executed in the summer of 2016 by ARL, USC ICT, and Headwall Photonics. The purpose was to image part of the USC main campus in Los Angeles, USA, using two portable COTS (commercial off the shelf) aerial drone solutions for data acquisition, for photogrammetry (3D reconstruction from images), and fusion of hyperspectral data with the recovered set of 3D point clouds representing the target area. The research aims for determining the viability of having a machine capable of segmenting the target area into key material classes (e.g., manmade structures, live vegetation, water) for use in multiple purposes, to include providing the user with a more accurate scene understanding and enabling the unsupervised automatic sampling of meaningful material classes from the target area for adaptive semi-supervised machine learning. In the latter, a target-set library may be used for automatic machine training with data of local material classes, as an example, to increase the prediction chances of machines recognizing targets. The field experiment and associated data post processing approach to correct for reflectance, geo-rectify, recover the area’s dense point clouds from images, register spectral with elevation properties of scene surfaces from the independently collected datasets, and generate the desired scene segmented maps are discussed. Lessons learned from the experience are also highlighted throughout the paper.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Conover, Damon M.; Beidleman, Brittany; McAlinden, Ryan; Borel-Donohue, Christoph C.
Visualizing UAS-Collected Imagery Using Augmented Reality Proceedings Article
In: Proceedings of the Next-Generation Analyst V conference, pp. 102070C, SPIE, Anaheim, CA, 2017.
@inproceedings{conover_visualizing_2017,
title = {Visualizing UAS-Collected Imagery Using Augmented Reality},
author = {Damon M. Conover and Brittany Beidleman and Ryan McAlinden and Christoph C. Borel-Donohue},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2262864},
doi = {10.1117/12.2262864},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Next-Generation Analyst V conference},
pages = {102070C},
publisher = {SPIE},
address = {Anaheim, CA},
abstract = {One of the areas where augmented reality will have an impact is in the visualization of 3-D data. 3-D data has traditionally been viewed on a 2-D screen, which has limited its utility. Augmented reality head-mounted displays, such as the Microsoft HoloLens, make it possible to view 3-D data overlaid on the real world. This allows a user to view and interact with the data in ways similar to how they would interact with a physical 3-D object, such as moving, rotating, or walking around it. A type of 3-D data that is particularly useful for military applications is geo-specific 3-D terrain data, and the visualization of this data is critical for training, mission planning, intelligence, and improved situational awareness. Advances in Unmanned Aerial Systems (UAS), photogrammetry software, and rendering hardware have drastically reduced the technological and financial obstacles in collecting aerial imagery and in generating 3-D terrain maps from that imagery. Because of this, there is an increased need to develop new tools for the exploitation of 3-D data. We will demonstrate how the HoloLens can be used as a tool for visualizing 3-D terrain data. We will describe: 1) how UAS-collected imagery is used to create 3-D terrain maps, 2) how those maps are deployed to the HoloLens, 3) how a user can view and manipulate the maps, and 4) how multiple users can view the same virtual 3-D object at the same time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan; McAlinden, Ryan; Conover, Damon
Producing Usable Simulation Terrain Data from UAS-Collected Imagery Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{spicer_producing_2016,
title = {Producing Usable Simulation Terrain Data from UAS-Collected Imagery},
author = {Ryan Spicer and Ryan McAlinden and Damon Conover},
url = {http://ict.usc.edu/pubs/Producing%20Usable%20Simulation%20Terrain%20Data%20from%20UAS-Collected%20Imagery.pdf},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {At I/ITSEC 2015, we presented an approach to produce geo-referenced, highly-detailed (10cm or better) 3D models for an area of interest using imagery collected from cheap, commercial-off-the-shelf, multirotor Unmanned Aerial Systems (UAS). This paper discusses the next steps in making this data usable for modern-day game and simulation engines, specifically how it may be visually rendered, used and reasoned with by the physics system, the artificial intelligence (AI), the simulation entities, and other components. The pipeline begins by segmenting the georeferenced point cloud created by the UAS imagery into terrain (elevation data) and structures or objects, including vegetation, structures, roads and other surface features. Attributes such as slope and edge detection and color matching are used to perform segmentation and clustering. After the terrain and objects are segmented, they are exported into engine-agnostic formats (georeferenced GeoTIFF digital elevation model (DEM) and ground textures, OBJ/FBX mesh files and JPG textures), which serves as the basis for their representation in-engine. The data is then attributed with metadata used in reasoning – collision surfaces, navigation meshes/networks, apertures, physics attributes (line-of-sight, ray-tracing), material surfaces, and others. Finally, it is loaded into the engine for real-time processing during runtime. The pipeline has been tested with several engines, including Unity, VBS, Unreal and TitanIM. The paper discusses the pipeline from collection to rendering, and as well as how other market/commercially-derived data can serve as the foundation for M&S terrain in the future. Examples of the output of this research are available online (McAlinden, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Kang, Sin-Hwa; Nye, Benjamin; Phillips, Artemisa; Campbell, Julia; Goldberg, Stephan L.
Cost-Effective Strategies for Producing Engaging Online Courseware Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{mcalinden_cost-effective_2016,
title = {Cost-Effective Strategies for Producing Engaging Online Courseware},
author = {Ryan McAlinden and Sin-Hwa Kang and Benjamin Nye and Artemisa Phillips and Julia Campbell and Stephan L. Goldberg},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {As distributed learning (dL) and computer-based training (CBT) continue to proliferate, the methods of delivery often remain unengaging and bland for participants. Though many of the leaders in commercial online learning have improved their delivery style and quality in recent years, they continue to fall short in terms of user engagement and satisfaction. PowerPoint regurgitation and video lectures are commonplace and leave end users uninspired and wanting more. This paper discusses results from an ongoing research project, Captivating Virtual Instruction for Training (CVIT), which is aimed at understanding and improving dL through a series of recommendations and best practices for promoting and enhancing student engagement online. Though the central focus is on engagement, and how that translates to learning potential, a third variable (cost) has been examined to understand the financial and resource impacts on making content more interesting (i.e. the return on investment, or ROI). The paper presents findings from a 3-year long experiment comparing existing dL methods and techniques both within and outside of the Army. The project developed two dL versions of an existing Army course (Advanced Situational Awareness-Basic (ASA-B)) – the first was designed around producing material that was as engaging and as immersive as possible within a target budget; the second was a scaled-down version using more traditional, yet contemporary dL techniques (PowerPoint recital, video lectures). The two were then compared along three dimensions– engagement, learning and cost. The findings show that improved engagement in distributed courseware is possible without breaking the bank, though the returns on learning with these progressive approaches remain inconclusive. More importantly, it was determined that the quality and experience of the designers, production staff, writers, animators, programmers, and others cannot be underestimated, and that the familiar phrase – ‘you get what you pay for’ is as true with online learning as it is with other areas of content design and software development.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Suma, Evan; Grechkin, Timofey; Enloe, Michael
Procedural Reconstruction of Simulation Terrain Using Drones Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
@inproceedings{mcalinden_procedural_2015,
title = {Procedural Reconstruction of Simulation Terrain Using Drones},
author = {Ryan McAlinden and Evan Suma and Timofey Grechkin and Michael Enloe},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Photogrammetric techniques for constructing 3D virtual environments have previously been plagued by expensive equipment, imprecise and visually unappealing results. However, with the introduction of low-cost, off-the-shelf (OTS) unmanned aerial systems (UAS), lighter and capable cameras, and more efficient software techniques for reconstruction, the modeling and simulation (M&S) community now has available to it new types of virtual assets that are suited for modern-day games and simulations. This paper presents an approach for fully autonomously collecting, processing, storing and rendering highly-detailed geo-specific terrain data using these OTS techniques and methods. We detail the types of equipment used, the flight parameters, the processing and reconstruction pipeline, and finally the results of using the dataset in a game/simulation engine. A key objective of the research is procedurally segmenting the terrain into usable features that the engine can interpret – i.e. distinguishing between roads, buildings, vegetation, etc. This allows the simulation core to assign attributes related to physics, lighting, collision cylinders and navigation meshes that not only support basic rendering of the model but introduce interaction with it. The results of this research are framed in the context of a new paradigm for geospatial collection, analysis and simulation. Specifically, the next generation of M&S systems will need to integrate environmental representations that have higher detail and richer metadata while ensuring a balance between performance and usability.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Pynadath, David V.; Hill, Randall W.
UrbanSim: Using Social Simulation to Train for Stability Operations Book Section
In: Understanding Megacities with the Reconnaissance, Surveillance, and Intelligence Paradigm, 2014.
@incollection{mcalinden_urbansim_2014,
title = {UrbanSim: Using Social Simulation to Train for Stability Operations},
author = {Ryan McAlinden and David V. Pynadath and Randall W. Hill},
url = {http://ict.usc.edu/pubs/UrbanSim%20-%20Using%20Social%20Simulation%20to%20Train%20for%20Stability%20Operations.pdf},
year = {2014},
date = {2014-04-01},
booktitle = {Understanding Megacities with the Reconnaissance, Surveillance, and Intelligence Paradigm},
abstract = {As the United States reorients itself towards to a period of reduced military capacity and away from large‐footprint military engagements, there is an imperative to keep commanders and decision‐makers mentally sharp and prepared for the next ‘hot spot.’ One potential hot spot, megacities, presents a unique set of challenges due to their expansive, often interwoven ethnographic landscapes, and their overall lack of understanding by many western experts. Social simulation using agent‐based models is one approach for furthering our understanding of distant societies and their security implications, and for preparing leaders to engage these populations if and when the need arises. Over the past ten years, the field of social simulation has become decidedly cross‐discipline, including academics and practitioners from the fields of sociology, anthropology, psychology, artificial intelligence and engineering. This has led to an unparalleled advancement in social simulation theory and practice, and as new threats evolve to operate within dense but expansive urban environments, social simulation has a unique opportunity to shape our perspectives and develop knowledge that may otherwise be difficult to obtain. This article presents a social simulation‐based training application (UrbanSim) developed by the University of Southern California’s Institute for Creative Technologies (USC‐ICT) in partnership with the US Army’s School for Command Preparation (SCP). UrbanSim has been in‐use since 2009 to help Army commanders understand and train for missions in complex, uncertain environments. The discussion describes how the social simulation‐based training application was designed to develop and hone commanders' skills for conducting missions in environs with multifaceted social, ethnic and political fabrics. We present a few considerations when attempting to recreate dense, rapidly growing population centers, and how the integration of real‐world data into social simulation frameworks can add a level of realism and understanding not possible even a few years ago.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Filter
2024
Liu, Ziming; Suen, Christine Wun Ki; Zou, Zhengbo; Chen, Meida; Shi, Yangming
Assessing Workers’ Operational Postures via Egocentric Camera Mapping Proceedings Article
In: Computing in Civil Engineering 2023, pp. 17–24, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8522-4.
Links | BibTeX | Tags: Narrative, STG
@inproceedings{liu_assessing_2024,
title = {Assessing Workers’ Operational Postures via Egocentric Camera Mapping},
author = {Ziming Liu and Christine Wun Ki Suen and Zhengbo Zou and Meida Chen and Yangming Shi},
url = {https://ascelibrary.org/doi/10.1061/9780784485224.003},
doi = {10.1061/9780784485224.003},
isbn = {978-0-7844-8522-4},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-19},
booktitle = {Computing in Civil Engineering 2023},
pages = {17–24},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {Narrative, STG},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Yu, Zifan; Chen, Meida; Zhang, Zhikang; You, Suya; Ren, Fengbo
TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: DTIC, STG, UARC
@article{yu_transupr_2023,
title = {TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation},
author = {Zifan Yu and Meida Chen and Zhikang Zhang and Suya You and Fengbo Ren},
url = {https://arxiv.org/abs/2302.08594},
doi = {10.48550/ARXIV.2302.08594},
year = {2023},
date = {2023-02-01},
urldate = {2023-08-24},
abstract = {In this work, we target the problem of uncertain points refinement for image-based LiDAR point cloud semantic segmentation (LiDAR PCSS). This problem mainly results from the boundary-blurring problem of convolution neural networks (CNNs) and quantitation loss of spherical projection, which are often hard to avoid for common image-based LiDAR PCSS approaches. We propose a plug-and-play transformer-based uncertain point refiner (TransUPR) to address the problem. Through local feature aggregation, uncertain point localization, and self-attention-based transformer design, TransUPR, integrated into an existing range image-based LiDAR PCSS approach (e.g., CENet), achieves the state-of-the-art performance (68.2% mIoU) on Semantic-KITTI benchmark, which provides a performance improvement of 0.6% on the mIoU.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {DTIC, STG, UARC},
pubstate = {published},
tppubtype = {article}
}
2020
Chen, Meida; Feng, Andrew; McCullough, Kyle; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio
3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework Journal Article
In: Journal of Computing in Civil Engineering, vol. 34, no. 6, 2020, ISSN: 0887-3801, 1943-5487.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@article{chen_3d_2020,
title = {3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework},
author = {Meida Chen and Andrew Feng and Kyle McCullough and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0000929},
doi = {10.1061/(ASCE)CP.1943-5487.0000929},
issn = {0887-3801, 1943-5487},
year = {2020},
date = {2020-11-01},
journal = {Journal of Computing in Civil Engineering},
volume = {34},
number = {6},
abstract = {The US Army is paying increased attention to the development of rapid three-dimensional (3D) reconstruction using photogrammetry and unmanned aerial vehicle (UAV) technologies for creating virtual environments and simulations in areas of interest. The ability of the intelligence community, mission commanders, and front-line soldiers to understand their deployed physical environment in advance is critical in the planning and rehearsal phases of any military operation. In order to achieve various simulation capabilities such as destruction operations, route planning, and explosive-standoff distances computation among others, reconstructed 3D data needs to be properly attributed. In this paper, we introduce a model ensembling framework for segmenting a 3D photogrammetry point cloud into top-level terrain elements (i.e., ground, human-made objects, and vegetation). Preprocessing and postprocessing methods were designed to overcome the data segmentation challenges posed by photogrammetric data-quality issues. A large UAV-based photogrammetric database was created for validation purposes. The designed model ensembling framework was compared with existing point cloud segmentation algorithms, and it outperformed other algorithms and achieved the best F1-score. Because the ultimate goal of segmenting a photogrammetric-generated point cloud is to create realistic virtual environments for simulation. Qualitative results for creating virtual environments using the segmented data are also discussed in this paper. DOI: 10.1061/(ASCE)CP.1943-5487.0000929. © 2020 American Society of Civil Engineers.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Chen, Meida; Feng, Andrew; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio; Enloe, Mike
Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),, pp. 13, ResearchGate, Orlando, FL, 2020.
Abstract | Links | BibTeX | Tags: Graphics, Narrative, STG, UARC
@inproceedings{chen_fully_2020,
title = {Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain},
author = {Meida Chen and Andrew Feng and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman and Mike Enloe},
url = {https://www.researchgate.net/publication/338557943_Fully_Automated_Photogrammetric_Data_Segmentation_and_Object_Information_Extraction_Approach_for_Creating_Simulation_Terrain},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),},
pages = {13},
publisher = {ResearchGate},
address = {Orlando, FL},
abstract = {Our previous works have demonstrated that visually realistic 3D meshes can be automatically reconstructed with lowcost, off-the-shelf unmanned aerial systems (UAS) equipped with capable cameras, and efficient photogrammetric software techniques (McAlinden, Suma, Grechkin, & Enloe, 2015; Spicer, McAlinden, Conover, & Adelphi, 2016). However, such generated data do not contain semantic information/features of objects (i.e., man-made objects, vegetation, ground, object materials, etc.) and cannot allow the sophisticated user-level and system-level interaction. Considering the use case of the data in creating realistic virtual environments for training and simulations (i.e., mission planning, rehearsal, threat detection, etc.), segmenting the data and extracting object information are essential tasks. Previous studies have focused on and made valuable contributions to segment Light Detection and Ranging (LIDAR) generated 3D point clouds and classifying ground materials from real-world images. However, only a few studies have focused on the data created using the photogrammetric technique.},
keywords = {Graphics, Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Feng, Andrew; Gordon, Andrew S.
Latent Terrain Representations for Trajectory Prediction Proceedings Article
In: Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19, pp. 1–4, ACM Press, Chicago, IL, USA, 2019, ISBN: 978-1-4503-6951-0.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@inproceedings{feng_latent_2019,
title = {Latent Terrain Representations for Trajectory Prediction},
author = {Andrew Feng and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3356392.3365218},
doi = {10.1145/3356392.3365218},
isbn = {978-1-4503-6951-0},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19},
pages = {1–4},
publisher = {ACM Press},
address = {Chicago, IL, USA},
abstract = {In natural outdoor environments, the shape of the surface terrain is an important factor in selecting a traversal path, both when operating off-road vehicles and maneuvering on foot. With the increased availability of digital elevation models for outdoor terrain, new opportunities exist to exploit this contextual information to improve automated path prediction. In this paper, we investigate predictive neural network models for outdoor trajectories that traverse terrain with known surface topography. We describe a method of encoding digital surface models as vectors in latent space using Wasserstein Autoencoders, and their use in convolutional neural networks that predict future trajectory positions from past trajectory data. We observe gains in predictive performance across three experiments, using both synthetic and recorded trajectories on real-world terrain.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; McAlinden, Ryan; Soibelman, Lucio
Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations Journal Article
In: Journal of Management in Engineering, vol. 36, no. 2, pp. 04019046, 2019, ISSN: 0742-597X, 1943-5479.
Abstract | Links | BibTeX | Tags: STG, UARC
@article{chen_photogrammetric_2019,
title = {Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations},
author = {Meida Chen and Andrew Feng and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29ME.1943-5479.0000737},
doi = {10.1061/(ASCE)ME.1943-5479.0000737},
issn = {0742-597X, 1943-5479},
year = {2019},
date = {2019-11-01},
journal = {Journal of Management in Engineering},
volume = {36},
number = {2},
pages = {04019046},
abstract = {Photogrammetric techniques have dramatically improved over the last few years, enabling the creation of visually compelling three-dimensional (3D) meshes using unmanned aerial vehicle imagery. These high-quality 3D meshes have attracted notice from both academicians and industry practitioners in developing virtual environments and simulations. However, photogrammetric generated point clouds and meshes do not allow both user-level and system-level interaction because they do not contain the semantic information to distinguish between objects. Thus, segmenting generated point clouds and meshes and extracting the associated object information is a necessary step. A framework for point cloud and mesh classification and segmentation is presented in this paper. The proposed framework was designed considering photogrammetric data-quality issues and provides a novel way of extracting object information, including (1) individual tree locations and related features and (2) building footprints. Experiments were conducted to rank different point descriptors and evaluate supervised machine-learning algorithms for segmenting photogrammetric generated point clouds. The proposed framework was validated using data collected at the University of Southern California (USC) and the Muscatatuck Urban Training Center (MUTC). DOI: 10.1061/(ASCE) ME.1943-5479.0000737. © 2019 American Society of Civil Engineers.},
keywords = {STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Fleming, Steven D; O’Banion, Matt S; McAlinden, Ryan; Oxendine, Christopher; Wright, William; Irmischer, Ian
Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations Journal Article
In: Annual Report (State and Future of GEOINT), pp. 5, 2019.
Abstract | Links | BibTeX | Tags: DoD, Simulation, STG
@article{fleming_rapid_2019,
title = {Rapid Terrain Generation for Geovisualization, Simulation, Mission Rehearsal & Operations},
author = {Steven D Fleming and Matt S O’Banion and Ryan McAlinden and Christopher Oxendine and William Wright and Ian Irmischer},
url = {http://trajectorymagazine.com/rapid-terrain-generation/},
year = {2019},
date = {2019-01-01},
journal = {Annual Report (State and Future of GEOINT)},
pages = {5},
abstract = {Geospecific 3D terrain representation (aka reality modeling) is revolutionizing geovisualization, simulation, and engineering practices around the world. In tandem with the rapid growth in unmanned aerial systems (UAS) and small satellites, reality modeling advancements now allow geospatial intelligence (GEOINT) practitioners to generate three-dimensional models from a decentralized collection of digital images to meet mission needs in both urban and rural environments. Scalable mesh models deliver enhanced, real-world visualization for engineers, geospatial teams, combatant, and combat support organizations. In this, reality modeling provides a detailed understanding of the physical environment, and models allow installation engineers and GEOINT practitioners to quickly generate updated, high-precision 3D reality meshes to provide real-world digital context for the decision-making process.},
keywords = {DoD, Simulation, STG},
pubstate = {published},
tppubtype = {article}
}
Chen, Meida; Astani, Sonny; McAlinden, Ryan; Spicer, Ryan
Semantic Modeling of Outdoor Scenes for the Creation of Virtual Environments and Simulations Proceedings Article
In: Proceedings of the 52nd Hawaii International Conference on System Sciences, pp. 10, IEEE, Maui, Hawaii, 2019, ISBN: 978-0-9981331-2-6.
Abstract | Links | BibTeX | Tags: STG, UARC
@inproceedings{chen_semantic_2019,
title = {Semantic Modeling of Outdoor Scenes for the Creation of Virtual Environments and Simulations},
author = {Meida Chen and Sonny Astani and Ryan McAlinden and Ryan Spicer},
url = {https://scholarspace.manoa.hawaii.edu/handle/10125/59634},
doi = {10.24251/HICSS.2019.236},
isbn = {978-0-9981331-2-6},
year = {2019},
date = {2019-01-01},
booktitle = {Proceedings of the 52nd Hawaii International Conference on System Sciences},
pages = {10},
publisher = {IEEE},
address = {Maui, Hawaii},
abstract = {Efforts from both academia and industry have adopted photogrammetric techniques to generate visually compelling 3D models for the creation of virtual environments and simulations. However, such generated meshes do not contain semantic information for distinguishing between objects. To allow both user- and system-level interaction with the meshes, and enhance the visual acuity of the scene, classifying the generated point clouds and associated meshes is a necessary step. This paper presents a point cloud/mesh classification and segmentation framework. The proposed framework provides a novel way of extracting object information – i.e., individual tree locations and related features while considering the data quality issues presented in a photogrammetric-generated point cloud. A case study has been conducted using data that were collected at the University of Southern California to evaluate the proposed framework.},
keywords = {STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Chen, Meida; Koc, Eyuphan; Shi, Zhuoya; Soibelman, Lucio
Proactive 2D model-based scan planning for existing buildings Journal Article
In: Automation in Construction, vol. 93, pp. 165–177, 2018, ISSN: 09265805.
Abstract | Links | BibTeX | Tags: STG, UARC
@article{chen_proactive_2018,
title = {Proactive 2D model-based scan planning for existing buildings},
author = {Meida Chen and Eyuphan Koc and Zhuoya Shi and Lucio Soibelman},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0926580517310385},
doi = {10.1016/j.autcon.2018.05.010},
issn = {09265805},
year = {2018},
date = {2018-09-01},
journal = {Automation in Construction},
volume = {93},
pages = {165–177},
abstract = {Creating a building information model (BIM) is known to be valuable during the life-cycle of a building. In most cases, a BIM of an existing building either does not exist or is out of date. For existing buildings, an as-is BIM is needed to leverage the technology towards building life-cycle objectives. To create an as-is BIM, field surveying is a necessary task in collecting current building related information. Terrestrial laser scanners have been widely accepted as field surveying instruments due to their high level of accuracy. However, laser scanning is a timeconsuming and labor-intensive process. Site revisiting and reworking of the scanning process is generally unavoidable because ofinappropriate datacollection processes. In thiscontext, creatinga scanplan beforegoing to a job-site can improve the data collection process. In this study, the authors have proposed a 2D proactive scanplanning frameworkthatincludesthreemodules: aninformation-gathering module,apreparation module,anda searching module. In addition, three search algorithms — a greedy best-first search algorithm, a greedy search algorithm with a backtracking process, and a simulated annealing algorithm — were compared based on 64 actual building site drawings to identify strength and limitations. The experimental results demonstrate that the greedy search algorithm with a backtracking process could be used to compute an initial scan plan and the simulated annealing algorithm couldbe used tofurther refinethe initial scanplan. This paperwill alsointroduce the results of a case study that deployed the proposed scan-planning framework. In the case study, the resulting 3D-point cloud that was generated based on the proposed framework was compared with the 3D point cloud created with data collected through a planned scanning process performed by a scan technician.},
keywords = {STG, UARC},
pubstate = {published},
tppubtype = {article}
}
2017
Rosario, Dalton; Borel, Christoph; Conover, Damon; McAlinden, Ryan; Ortiz, Anthony; Shiver, Sarah; Simon, Blair
Small Drone Field Experiment: Data Collection & Processing Journal Article
In: NATO SET-241 Symposium, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, STG, UARC
@article{rosario_small_2017,
title = {Small Drone Field Experiment: Data Collection & Processing},
author = {Dalton Rosario and Christoph Borel and Damon Conover and Ryan McAlinden and Anthony Ortiz and Sarah Shiver and Blair Simon},
url = {https://arxiv.org/abs/1711.10693},
year = {2017},
date = {2017-11-01},
journal = {NATO SET-241 Symposium},
abstract = {Following an initiative formalized in April 2016—formally known as ARL West—between the U.S. Army Research Laboratory (ARL) and University of Southern California’s Institute for Creative Technologies (USC ICT), a field experiment was coordinated and executed in the summer of 2016 by ARL, USC ICT, and Headwall Photonics. The purpose was to image part of the USC main campus in Los Angeles, USA, using two portable COTS (commercial off the shelf) aerial drone solutions for data acquisition, for photogrammetry (3D reconstruction from images), and fusion of hyperspectral data with the recovered set of 3D point clouds representing the target area. The research aims for determining the viability of having a machine capable of segmenting the target area into key material classes (e.g., manmade structures, live vegetation, water) for use in multiple purposes, to include providing the user with a more accurate scene understanding and enabling the unsupervised automatic sampling of meaningful material classes from the target area for adaptive semi-supervised machine learning. In the latter, a target-set library may be used for automatic machine training with data of local material classes, as an example, to increase the prediction chances of machines recognizing targets. The field experiment and associated data post processing approach to correct for reflectance, geo-rectify, recover the area’s dense point clouds from images, register spectral with elevation properties of scene surfaces from the independently collected datasets, and generate the desired scene segmented maps are discussed. Lessons learned from the experience are also highlighted throughout the paper.},
keywords = {ARL, DoD, STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Conover, Damon M.; Beidleman, Brittany; McAlinden, Ryan; Borel-Donohue, Christoph C.
Visualizing UAS-Collected Imagery Using Augmented Reality Proceedings Article
In: Proceedings of the Next-Generation Analyst V conference, pp. 102070C, SPIE, Anaheim, CA, 2017.
Abstract | Links | BibTeX | Tags: ARL, DoD, STG, UARC
@inproceedings{conover_visualizing_2017,
title = {Visualizing UAS-Collected Imagery Using Augmented Reality},
author = {Damon M. Conover and Brittany Beidleman and Ryan McAlinden and Christoph C. Borel-Donohue},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2262864},
doi = {10.1117/12.2262864},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the Next-Generation Analyst V conference},
pages = {102070C},
publisher = {SPIE},
address = {Anaheim, CA},
abstract = {One of the areas where augmented reality will have an impact is in the visualization of 3-D data. 3-D data has traditionally been viewed on a 2-D screen, which has limited its utility. Augmented reality head-mounted displays, such as the Microsoft HoloLens, make it possible to view 3-D data overlaid on the real world. This allows a user to view and interact with the data in ways similar to how they would interact with a physical 3-D object, such as moving, rotating, or walking around it. A type of 3-D data that is particularly useful for military applications is geo-specific 3-D terrain data, and the visualization of this data is critical for training, mission planning, intelligence, and improved situational awareness. Advances in Unmanned Aerial Systems (UAS), photogrammetry software, and rendering hardware have drastically reduced the technological and financial obstacles in collecting aerial imagery and in generating 3-D terrain maps from that imagery. Because of this, there is an increased need to develop new tools for the exploitation of 3-D data. We will demonstrate how the HoloLens can be used as a tool for visualizing 3-D terrain data. We will describe: 1) how UAS-collected imagery is used to create 3-D terrain maps, 2) how those maps are deployed to the HoloLens, 3) how a user can view and manipulate the maps, and 4) how multiple users can view the same virtual 3-D object at the same time.},
keywords = {ARL, DoD, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Spicer, Ryan; McAlinden, Ryan; Conover, Damon
Producing Usable Simulation Terrain Data from UAS-Collected Imagery Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, STG, UARC
@inproceedings{spicer_producing_2016,
title = {Producing Usable Simulation Terrain Data from UAS-Collected Imagery},
author = {Ryan Spicer and Ryan McAlinden and Damon Conover},
url = {http://ict.usc.edu/pubs/Producing%20Usable%20Simulation%20Terrain%20Data%20from%20UAS-Collected%20Imagery.pdf},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {At I/ITSEC 2015, we presented an approach to produce geo-referenced, highly-detailed (10cm or better) 3D models for an area of interest using imagery collected from cheap, commercial-off-the-shelf, multirotor Unmanned Aerial Systems (UAS). This paper discusses the next steps in making this data usable for modern-day game and simulation engines, specifically how it may be visually rendered, used and reasoned with by the physics system, the artificial intelligence (AI), the simulation entities, and other components. The pipeline begins by segmenting the georeferenced point cloud created by the UAS imagery into terrain (elevation data) and structures or objects, including vegetation, structures, roads and other surface features. Attributes such as slope and edge detection and color matching are used to perform segmentation and clustering. After the terrain and objects are segmented, they are exported into engine-agnostic formats (georeferenced GeoTIFF digital elevation model (DEM) and ground textures, OBJ/FBX mesh files and JPG textures), which serves as the basis for their representation in-engine. The data is then attributed with metadata used in reasoning – collision surfaces, navigation meshes/networks, apertures, physics attributes (line-of-sight, ray-tracing), material surfaces, and others. Finally, it is loaded into the engine for real-time processing during runtime. The pipeline has been tested with several engines, including Unity, VBS, Unreal and TitanIM. The paper discusses the pipeline from collection to rendering, and as well as how other market/commercially-derived data can serve as the foundation for M&S terrain in the future. Examples of the output of this research are available online (McAlinden, 2016).},
keywords = {ARL, DoD, MxR, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Kang, Sin-Hwa; Nye, Benjamin; Phillips, Artemisa; Campbell, Julia; Goldberg, Stephan L.
Cost-Effective Strategies for Producing Engaging Online Courseware Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, MedVR, MxR, STG, UARC
@inproceedings{mcalinden_cost-effective_2016,
title = {Cost-Effective Strategies for Producing Engaging Online Courseware},
author = {Ryan McAlinden and Sin-Hwa Kang and Benjamin Nye and Artemisa Phillips and Julia Campbell and Stephan L. Goldberg},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {As distributed learning (dL) and computer-based training (CBT) continue to proliferate, the methods of delivery often remain unengaging and bland for participants. Though many of the leaders in commercial online learning have improved their delivery style and quality in recent years, they continue to fall short in terms of user engagement and satisfaction. PowerPoint regurgitation and video lectures are commonplace and leave end users uninspired and wanting more. This paper discusses results from an ongoing research project, Captivating Virtual Instruction for Training (CVIT), which is aimed at understanding and improving dL through a series of recommendations and best practices for promoting and enhancing student engagement online. Though the central focus is on engagement, and how that translates to learning potential, a third variable (cost) has been examined to understand the financial and resource impacts on making content more interesting (i.e. the return on investment, or ROI). The paper presents findings from a 3-year long experiment comparing existing dL methods and techniques both within and outside of the Army. The project developed two dL versions of an existing Army course (Advanced Situational Awareness-Basic (ASA-B)) – the first was designed around producing material that was as engaging and as immersive as possible within a target budget; the second was a scaled-down version using more traditional, yet contemporary dL techniques (PowerPoint recital, video lectures). The two were then compared along three dimensions– engagement, learning and cost. The findings show that improved engagement in distributed courseware is possible without breaking the bank, though the returns on learning with these progressive approaches remain inconclusive. More importantly, it was determined that the quality and experience of the designers, production staff, writers, animators, programmers, and others cannot be underestimated, and that the familiar phrase – ‘you get what you pay for’ is as true with online learning as it is with other areas of content design and software development.},
keywords = {ARL, DoD, Learning Sciences, MedVR, MxR, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
McAlinden, Ryan; Suma, Evan; Grechkin, Timofey; Enloe, Michael
Procedural Reconstruction of Simulation Terrain Using Drones Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
Abstract | Links | BibTeX | Tags: MxR, STG
@inproceedings{mcalinden_procedural_2015,
title = {Procedural Reconstruction of Simulation Terrain Using Drones},
author = {Ryan McAlinden and Evan Suma and Timofey Grechkin and Michael Enloe},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Photogrammetric techniques for constructing 3D virtual environments have previously been plagued by expensive equipment, imprecise and visually unappealing results. However, with the introduction of low-cost, off-the-shelf (OTS) unmanned aerial systems (UAS), lighter and capable cameras, and more efficient software techniques for reconstruction, the modeling and simulation (M&S) community now has available to it new types of virtual assets that are suited for modern-day games and simulations. This paper presents an approach for fully autonomously collecting, processing, storing and rendering highly-detailed geo-specific terrain data using these OTS techniques and methods. We detail the types of equipment used, the flight parameters, the processing and reconstruction pipeline, and finally the results of using the dataset in a game/simulation engine. A key objective of the research is procedurally segmenting the terrain into usable features that the engine can interpret – i.e. distinguishing between roads, buildings, vegetation, etc. This allows the simulation core to assign attributes related to physics, lighting, collision cylinders and navigation meshes that not only support basic rendering of the model but introduce interaction with it. The results of this research are framed in the context of a new paradigm for geospatial collection, analysis and simulation. Specifically, the next generation of M&S systems will need to integrate environmental representations that have higher detail and richer metadata while ensuring a balance between performance and usability.},
keywords = {MxR, STG},
pubstate = {published},
tppubtype = {inproceedings}
}
2014
McAlinden, Ryan; Pynadath, David V.; Hill, Randall W.
UrbanSim: Using Social Simulation to Train for Stability Operations Book Section
In: Understanding Megacities with the Reconnaissance, Surveillance, and Intelligence Paradigm, 2014.
Abstract | Links | BibTeX | Tags: Social Simulation, STG, UARC
@incollection{mcalinden_urbansim_2014,
title = {UrbanSim: Using Social Simulation to Train for Stability Operations},
author = {Ryan McAlinden and David V. Pynadath and Randall W. Hill},
url = {http://ict.usc.edu/pubs/UrbanSim%20-%20Using%20Social%20Simulation%20to%20Train%20for%20Stability%20Operations.pdf},
year = {2014},
date = {2014-04-01},
booktitle = {Understanding Megacities with the Reconnaissance, Surveillance, and Intelligence Paradigm},
abstract = {As the United States reorients itself towards to a period of reduced military capacity and away from large‐footprint military engagements, there is an imperative to keep commanders and decision‐makers mentally sharp and prepared for the next ‘hot spot.’ One potential hot spot, megacities, presents a unique set of challenges due to their expansive, often interwoven ethnographic landscapes, and their overall lack of understanding by many western experts. Social simulation using agent‐based models is one approach for furthering our understanding of distant societies and their security implications, and for preparing leaders to engage these populations if and when the need arises. Over the past ten years, the field of social simulation has become decidedly cross‐discipline, including academics and practitioners from the fields of sociology, anthropology, psychology, artificial intelligence and engineering. This has led to an unparalleled advancement in social simulation theory and practice, and as new threats evolve to operate within dense but expansive urban environments, social simulation has a unique opportunity to shape our perspectives and develop knowledge that may otherwise be difficult to obtain. This article presents a social simulation‐based training application (UrbanSim) developed by the University of Southern California’s Institute for Creative Technologies (USC‐ICT) in partnership with the US Army’s School for Command Preparation (SCP). UrbanSim has been in‐use since 2009 to help Army commanders understand and train for missions in complex, uncertain environments. The discussion describes how the social simulation‐based training application was designed to develop and hone commanders' skills for conducting missions in environs with multifaceted social, ethnic and political fabrics. We present a few considerations when attempting to recreate dense, rapidly growing population centers, and how the integration of real‐world data into social simulation frameworks can add a level of realism and understanding not possible even a few years ago.},
keywords = {Social Simulation, STG, UARC},
pubstate = {published},
tppubtype = {incollection}
}